diff --git a/.gitattributes b/.gitattributes index a4691b685af8b61190ad2196f6f5295866f498af..1d960b901e81c59c311c952f37f5466a02a70b38 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1142,3 +1142,469 @@ cityscapes/leftImg8bit/train/tubingen/tubingen_000109_000019_leftImg8bit.png fil cityscapes/leftImg8bit/train/tubingen/tubingen_000042_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text cityscapes/leftImg8bit/train/tubingen/tubingen_000030_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text cityscapes/leftImg8bit/train/weimar/weimar_000040_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000025_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000140_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000127_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000125_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000032_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000117_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000000_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000079_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000100_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000081_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000017_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000072_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000065_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000078_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000096_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000101_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000001_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000097_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000080_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000016_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000116_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000033_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000073_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000124_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000041_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000038_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000141_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000024_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000137_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000133_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000056_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000126_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000048_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000031_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000043_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000131_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000014_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000103_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000054_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000026_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000002_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000009_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000095_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000066_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000108_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000088_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000003_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074425_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000008_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000089_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000114_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000070_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000071_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000083_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000115_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000067_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000055_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000102_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000109_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000094_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_067338_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000130_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000015_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000011_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000087_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000027_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000042_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000106_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000063_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000127_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000046_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000090_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000074_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000049_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000068_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_067799_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000111_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000006_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000034_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000128_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000135_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000030_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000029_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000023_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000050_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000051_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000086_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000113_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000122_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_023239_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000028_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_016558_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000134_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_014319_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000176_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000007_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000104_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_030346_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_004646_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_012675_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_035491_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000123_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000004_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000128_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000051_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000092_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000187_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_056601_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000175_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_000712_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000046_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000034_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_040051_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000207_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_046398_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_056457_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_051536_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000211_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_014713_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000122_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000138_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_042382_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000082_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_038855_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000010_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_052649_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_030546_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000035_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000210_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_042770_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_020655_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_027007_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000086_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000047_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000110_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000162_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000091_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_052512_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_057710_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_008200_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000075_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_052887_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_013205_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_023881_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_053437_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_002357_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_054965_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_049269_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_013094_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_055937_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_027998_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_046646_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_031144_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_039470_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_028202_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_004752_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_044085_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_034935_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_009128_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_029455_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_029325_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_044344_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_027481_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_007780_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_017041_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000022_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_036051_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_034141_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_024136_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_035606_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_001620_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_043653_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_031856_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_005599_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_040793_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_043102_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_030889_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_056800_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_025335_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_007342_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_046954_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_040221_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_047870_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_018800_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_023614_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_009420_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000109_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_040294_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000107_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_035768_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_009004_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_041493_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000042_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000067_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000015_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000083_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hanover/hanover_000000_019672_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000094_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000009_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000208_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000002_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000048_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000088_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000154_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000115_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/jena/jena_000070_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000031_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000126_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000131_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000054_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000282_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000266_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000143_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000148_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000314_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000295_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000171_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000214_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000231_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000303_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000289_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000026_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000182_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000195_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000108_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000189_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000308_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000095_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000043_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000254_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000014_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000066_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000003_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000103_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000166_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000008_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000243_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000082_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000071_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000009_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000089_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000114_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000230_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000167_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000015_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000088_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000188_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000001_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000170_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000002_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000238_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000242_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000183_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000115_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000070_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000215_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000102_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000249_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000255_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000067_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000027_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000149_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000227_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000294_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000302_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000142_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000288_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000109_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000202_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000094_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000030_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000267_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000309_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000155_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000005_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000209_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000057_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000127_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000130_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000270_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000042_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000315_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000049_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000283_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000300_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000217_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000139_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000140_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000055_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000296_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000039_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000000_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/bremen/bremen_000265_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000065_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000165_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000181_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000172_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000017_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000167_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000179_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000194_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000083_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000009_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000015_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000067_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000115_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000142_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000070_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000088_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000002_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000102_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000188_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000049_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000094_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000170_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000149_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000155_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000183_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000042_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000130_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000126_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000127_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000055_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000109_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000030_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000027_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000154_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000143_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000171_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000054_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000014_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000043_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000103_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000048_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000131_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000189_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000026_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000003_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000095_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000066_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000182_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000008_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000195_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000031_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000108_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000166_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000082_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000111_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000071_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000106_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000114_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000068_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000063_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000074_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/stuttgart/stuttgart_000089_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000011_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000087_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000046_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000090_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000128_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000006_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000034_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000123_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000097_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000051_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000135_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000022_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000134_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000047_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000029_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000023_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000028_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000035_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000050_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000129_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000086_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000075_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000007_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000122_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000110_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000107_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000077_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000112_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000098_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000062_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000069_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000091_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000084_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000012_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000019_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000010_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/tubingen/tubingen_000005_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000075_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000007_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000022_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000091_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000107_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000110_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000035_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000119_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000129_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000084_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000010_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000047_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000112_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000052_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000077_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000019_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000069_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000062_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000060_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000105_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000012_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000098_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000059_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000037_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000045_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000005_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000036_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000013_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000121_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000104_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000120_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000093_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000020_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000053_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000058_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000136_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000099_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_036527_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000044_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_052122_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000004_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000018_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_065983_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_062710_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000076_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_100300_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_106102_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000061_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_069096_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000118_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000085_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_088783_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_061790_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_027857_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000021_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000092_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_024251_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_014940_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_103856_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_043944_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_077144_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_054029_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_052904_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074694_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/weimar/weimar_000113_000019_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_104857_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_003904_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_103367_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_089696_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_086636_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_047157_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_089491_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text +cityscapes/leftImg8bit/train/hamburg/hamburg_000000_005639_leftImg8bit.png filter=lfs diff=lfs merge=lfs -text diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000000_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000000_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..2c83909ae96fe8fb6ce92f6cbcc59c1cbde589b0 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000000_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000001_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000001_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..48885aa02089642e7c564d3fa51112d54ed6e156 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000001_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000001_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000001_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..8752d7e3b353731fc3de88e8a718960bab41f084 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000001_000019_gtFine_polygons.json @@ -0,0 +1,6137 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 764, + 2 + ], + [ + 1326, + 2 + ], + [ + 1096, + 422 + ], + [ + 1004, + 424 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1, + 532 + ], + [ + 1006, + 431 + ], + [ + 1118, + 429 + ], + [ + 1554, + 455 + ], + [ + 1960, + 557 + ], + [ + 2048, + 561 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 722, + 467 + ], + [ + 751, + 480 + ], + [ + 732, + 497 + ], + [ + 553, + 523 + ], + [ + 0, + 624 + ], + [ + 0, + 549 + ], + [ + 451, + 481 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 722, + 467 + ], + [ + 751, + 480 + ], + [ + 732, + 497 + ], + [ + 553, + 523 + ], + [ + 0, + 624 + ], + [ + 0, + 549 + ], + [ + 451, + 481 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1019, + 410 + ], + [ + 1030, + 411 + ], + [ + 1039, + 411 + ], + [ + 1048, + 411 + ], + [ + 1069, + 410 + ], + [ + 1081, + 416 + ], + [ + 1085, + 436 + ], + [ + 1063, + 438 + ], + [ + 1029, + 438 + ], + [ + 1003, + 439 + ], + [ + 1001, + 431 + ], + [ + 1003, + 419 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 980, + 397 + ], + [ + 976, + 447 + ], + [ + 867, + 460 + ], + [ + 591, + 486 + ], + [ + 0, + 533 + ], + [ + 0, + 2 + ], + [ + 527, + 204 + ], + [ + 831, + 335 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1096, + 394 + ], + [ + 1435, + 174 + ], + [ + 1655, + 0 + ], + [ + 2048, + 2 + ], + [ + 2048, + 535 + ], + [ + 1375, + 462 + ], + [ + 1096, + 432 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 945, + 457 + ], + [ + 884, + 463 + ], + [ + 882, + 457 + ], + [ + 922, + 451 + ], + [ + 947, + 447 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 871, + 425 + ], + [ + 885, + 421 + ], + [ + 915, + 422 + ], + [ + 937, + 440 + ], + [ + 945, + 444 + ], + [ + 948, + 448 + ], + [ + 940, + 454 + ], + [ + 878, + 460 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 833, + 366 + ], + [ + 843, + 280 + ], + [ + 879, + 236 + ], + [ + 905, + 213 + ], + [ + 920, + 207 + ], + [ + 930, + 199 + ], + [ + 937, + 204 + ], + [ + 935, + 214 + ], + [ + 938, + 214 + ], + [ + 942, + 223 + ], + [ + 945, + 223 + ], + [ + 948, + 214 + ], + [ + 952, + 214 + ], + [ + 952, + 225 + ], + [ + 954, + 228 + ], + [ + 957, + 232 + ], + [ + 962, + 236 + ], + [ + 963, + 246 + ], + [ + 959, + 251 + ], + [ + 963, + 253 + ], + [ + 964, + 257 + ], + [ + 962, + 266 + ], + [ + 967, + 266 + ], + [ + 976, + 265 + ], + [ + 980, + 270 + ], + [ + 980, + 281 + ], + [ + 983, + 284 + ], + [ + 989, + 284 + ], + [ + 994, + 284 + ], + [ + 1000, + 286 + ], + [ + 1005, + 297 + ], + [ + 1009, + 307 + ], + [ + 1016, + 313 + ], + [ + 1021, + 319 + ], + [ + 1023, + 335 + ], + [ + 1029, + 339 + ], + [ + 1033, + 338 + ], + [ + 1038, + 339 + ], + [ + 1039, + 351 + ], + [ + 1041, + 357 + ], + [ + 1043, + 365 + ], + [ + 1041, + 371 + ], + [ + 1043, + 380 + ], + [ + 1042, + 382 + ], + [ + 1038, + 392 + ], + [ + 1036, + 401 + ], + [ + 1036, + 407 + ], + [ + 1031, + 419 + ], + [ + 1019, + 427 + ], + [ + 1003, + 445 + ], + [ + 957, + 434 + ], + [ + 954, + 419 + ], + [ + 954, + 400 + ], + [ + 949, + 397 + ], + [ + 940, + 397 + ], + [ + 938, + 397 + ], + [ + 937, + 442 + ], + [ + 934, + 440 + ], + [ + 934, + 398 + ], + [ + 923, + 401 + ], + [ + 924, + 450 + ], + [ + 922, + 450 + ], + [ + 918, + 396 + ], + [ + 910, + 394 + ], + [ + 908, + 415 + ], + [ + 908, + 455 + ], + [ + 897, + 457 + ], + [ + 893, + 406 + ], + [ + 894, + 389 + ], + [ + 888, + 387 + ], + [ + 865, + 383 + ], + [ + 855, + 383 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 859, + 344 + ], + [ + 857, + 397 + ], + [ + 815, + 396 + ], + [ + 813, + 353 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 840, + 353 + ], + [ + 841, + 423 + ], + [ + 839, + 423 + ], + [ + 836, + 352 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 752, + 435 + ], + [ + 754, + 398 + ], + [ + 755, + 339 + ], + [ + 740, + 344 + ], + [ + 725, + 338 + ], + [ + 715, + 319 + ], + [ + 706, + 256 + ], + [ + 715, + 182 + ], + [ + 842, + 35 + ], + [ + 851, + 42 + ], + [ + 864, + 44 + ], + [ + 865, + 44 + ], + [ + 865, + 50 + ], + [ + 861, + 60 + ], + [ + 862, + 66 + ], + [ + 866, + 71 + ], + [ + 868, + 72 + ], + [ + 874, + 70 + ], + [ + 886, + 64 + ], + [ + 897, + 64 + ], + [ + 898, + 72 + ], + [ + 902, + 75 + ], + [ + 897, + 82 + ], + [ + 884, + 90 + ], + [ + 882, + 99 + ], + [ + 890, + 103 + ], + [ + 901, + 104 + ], + [ + 906, + 110 + ], + [ + 912, + 112 + ], + [ + 916, + 113 + ], + [ + 917, + 124 + ], + [ + 917, + 131 + ], + [ + 920, + 138 + ], + [ + 920, + 152 + ], + [ + 923, + 157 + ], + [ + 925, + 166 + ], + [ + 915, + 175 + ], + [ + 921, + 180 + ], + [ + 926, + 180 + ], + [ + 920, + 191 + ], + [ + 919, + 201 + ], + [ + 920, + 212 + ], + [ + 916, + 226 + ], + [ + 914, + 237 + ], + [ + 915, + 252 + ], + [ + 920, + 268 + ], + [ + 923, + 280 + ], + [ + 927, + 300 + ], + [ + 926, + 318 + ], + [ + 919, + 330 + ], + [ + 912, + 345 + ], + [ + 909, + 356 + ], + [ + 901, + 376 + ], + [ + 885, + 381 + ], + [ + 868, + 382 + ], + [ + 858, + 385 + ], + [ + 858, + 377 + ], + [ + 851, + 367 + ], + [ + 854, + 427 + ], + [ + 849, + 424 + ], + [ + 842, + 366 + ], + [ + 833, + 366 + ], + [ + 830, + 367 + ], + [ + 830, + 410 + ], + [ + 834, + 412 + ], + [ + 834, + 421 + ], + [ + 816, + 425 + ], + [ + 816, + 408 + ], + [ + 820, + 403 + ], + [ + 821, + 369 + ], + [ + 811, + 369 + ], + [ + 805, + 367 + ], + [ + 802, + 361 + ], + [ + 801, + 351 + ], + [ + 800, + 349 + ], + [ + 793, + 357 + ], + [ + 792, + 394 + ], + [ + 795, + 405 + ], + [ + 797, + 406 + ], + [ + 800, + 422 + ], + [ + 779, + 424 + ], + [ + 776, + 406 + ], + [ + 782, + 403 + ], + [ + 779, + 367 + ], + [ + 779, + 356 + ], + [ + 776, + 345 + ], + [ + 769, + 338 + ], + [ + 772, + 401 + ], + [ + 769, + 446 + ], + [ + 755, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 714, + 433 + ], + [ + 730, + 428 + ], + [ + 771, + 434 + ], + [ + 755, + 477 + ], + [ + 720, + 478 + ], + [ + 709, + 456 + ], + [ + 710, + 443 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 570, + 450 + ], + [ + 574, + 441 + ], + [ + 602, + 436 + ], + [ + 606, + 432 + ], + [ + 607, + 426 + ], + [ + 612, + 425 + ], + [ + 614, + 435 + ], + [ + 637, + 433 + ], + [ + 655, + 430 + ], + [ + 670, + 430 + ], + [ + 671, + 427 + ], + [ + 671, + 422 + ], + [ + 678, + 420 + ], + [ + 682, + 427 + ], + [ + 705, + 425 + ], + [ + 700, + 478 + ], + [ + 643, + 487 + ], + [ + 570, + 483 + ], + [ + 567, + 468 + ], + [ + 567, + 460 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 408, + 327 + ], + [ + 410, + 349 + ], + [ + 428, + 353 + ], + [ + 427, + 356 + ], + [ + 411, + 359 + ], + [ + 411, + 435 + ], + [ + 405, + 438 + ], + [ + 404, + 324 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 421, + 339 + ], + [ + 447, + 339 + ], + [ + 447, + 373 + ], + [ + 421, + 371 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 371, + 337 + ], + [ + 399, + 342 + ], + [ + 400, + 374 + ], + [ + 373, + 374 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 356, + 353 + ], + [ + 386, + 353 + ], + [ + 386, + 354 + ], + [ + 360, + 358 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 640, + 451 + ], + [ + 671, + 449 + ], + [ + 683, + 459 + ], + [ + 694, + 461 + ], + [ + 701, + 471 + ], + [ + 699, + 482 + ], + [ + 693, + 488 + ], + [ + 680, + 491 + ], + [ + 677, + 491 + ], + [ + 672, + 491 + ], + [ + 646, + 491 + ], + [ + 614, + 482 + ], + [ + 610, + 471 + ], + [ + 610, + 461 + ], + [ + 610, + 457 + ], + [ + 629, + 454 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 664, + 386 + ], + [ + 663, + 496 + ], + [ + 660, + 496 + ], + [ + 659, + 387 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 673, + 357 + ], + [ + 676, + 396 + ], + [ + 652, + 395 + ], + [ + 652, + 357 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 636, + 313 + ], + [ + 628, + 314 + ], + [ + 618, + 313 + ], + [ + 607, + 321 + ], + [ + 599, + 325 + ], + [ + 588, + 319 + ], + [ + 582, + 310 + ], + [ + 564, + 306 + ], + [ + 560, + 294 + ], + [ + 548, + 250 + ], + [ + 563, + 0 + ], + [ + 886, + 1 + ], + [ + 883, + 7 + ], + [ + 883, + 11 + ], + [ + 886, + 13 + ], + [ + 883, + 24 + ], + [ + 871, + 33 + ], + [ + 857, + 45 + ], + [ + 856, + 56 + ], + [ + 852, + 84 + ], + [ + 852, + 100 + ], + [ + 859, + 118 + ], + [ + 863, + 136 + ], + [ + 858, + 162 + ], + [ + 858, + 178 + ], + [ + 866, + 191 + ], + [ + 875, + 204 + ], + [ + 880, + 218 + ], + [ + 872, + 234 + ], + [ + 853, + 250 + ], + [ + 823, + 268 + ], + [ + 781, + 294 + ], + [ + 759, + 295 + ], + [ + 736, + 313 + ], + [ + 734, + 327 + ], + [ + 728, + 335 + ], + [ + 721, + 339 + ], + [ + 714, + 351 + ], + [ + 716, + 397 + ], + [ + 730, + 398 + ], + [ + 717, + 477 + ], + [ + 703, + 478 + ], + [ + 703, + 409 + ], + [ + 700, + 396 + ], + [ + 704, + 389 + ], + [ + 704, + 358 + ], + [ + 700, + 340 + ], + [ + 700, + 325 + ], + [ + 698, + 315 + ], + [ + 688, + 320 + ], + [ + 679, + 322 + ], + [ + 677, + 316 + ], + [ + 677, + 311 + ], + [ + 667, + 309 + ], + [ + 656, + 320 + ], + [ + 647, + 325 + ], + [ + 648, + 390 + ], + [ + 650, + 394 + ], + [ + 646, + 500 + ], + [ + 630, + 495 + ], + [ + 634, + 385 + ], + [ + 634, + 336 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 603, + 456 + ], + [ + 621, + 459 + ], + [ + 637, + 453 + ], + [ + 653, + 450 + ], + [ + 657, + 453 + ], + [ + 654, + 458 + ], + [ + 650, + 464 + ], + [ + 651, + 470 + ], + [ + 660, + 472 + ], + [ + 666, + 477 + ], + [ + 668, + 487 + ], + [ + 668, + 494 + ], + [ + 664, + 499 + ], + [ + 655, + 503 + ], + [ + 643, + 503 + ], + [ + 637, + 498 + ], + [ + 633, + 491 + ], + [ + 631, + 485 + ], + [ + 627, + 486 + ], + [ + 612, + 495 + ], + [ + 605, + 502 + ], + [ + 589, + 503 + ], + [ + 582, + 497 + ], + [ + 575, + 486 + ], + [ + 575, + 479 + ], + [ + 583, + 471 + ], + [ + 594, + 467 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 548, + 438 + ], + [ + 564, + 443 + ], + [ + 575, + 444 + ], + [ + 579, + 452 + ], + [ + 580, + 460 + ], + [ + 570, + 467 + ], + [ + 573, + 472 + ], + [ + 576, + 477 + ], + [ + 577, + 485 + ], + [ + 572, + 496 + ], + [ + 566, + 503 + ], + [ + 554, + 501 + ], + [ + 542, + 493 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 625, + 325 + ], + [ + 628, + 502 + ], + [ + 626, + 502 + ], + [ + 620, + 322 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 620, + 316 + ], + [ + 625, + 318 + ], + [ + 628, + 322 + ], + [ + 631, + 353 + ], + [ + 633, + 363 + ], + [ + 631, + 373 + ], + [ + 628, + 381 + ], + [ + 622, + 385 + ], + [ + 621, + 375 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 528, + 269 + ], + [ + 511, + 267 + ], + [ + 503, + 259 + ], + [ + 487, + 253 + ], + [ + 473, + 242 + ], + [ + 450, + 196 + ], + [ + 423, + 0 + ], + [ + 685, + 0 + ], + [ + 685, + 5 + ], + [ + 684, + 25 + ], + [ + 690, + 33 + ], + [ + 699, + 39 + ], + [ + 701, + 61 + ], + [ + 711, + 72 + ], + [ + 715, + 90 + ], + [ + 734, + 97 + ], + [ + 754, + 106 + ], + [ + 764, + 122 + ], + [ + 753, + 147 + ], + [ + 725, + 167 + ], + [ + 689, + 186 + ], + [ + 669, + 213 + ], + [ + 643, + 235 + ], + [ + 620, + 244 + ], + [ + 584, + 255 + ], + [ + 560, + 260 + ], + [ + 552, + 258 + ], + [ + 547, + 318 + ], + [ + 549, + 377 + ], + [ + 560, + 383 + ], + [ + 549, + 508 + ], + [ + 518, + 503 + ], + [ + 527, + 404 + ], + [ + 533, + 276 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 503, + 466 + ], + [ + 519, + 463 + ], + [ + 533, + 465 + ], + [ + 542, + 478 + ], + [ + 541, + 494 + ], + [ + 538, + 501 + ], + [ + 532, + 506 + ], + [ + 522, + 509 + ], + [ + 515, + 507 + ], + [ + 508, + 504 + ], + [ + 502, + 497 + ], + [ + 499, + 491 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 389, + 436 + ], + [ + 406, + 432 + ], + [ + 448, + 430 + ], + [ + 491, + 436 + ], + [ + 507, + 456 + ], + [ + 514, + 487 + ], + [ + 508, + 508 + ], + [ + 492, + 510 + ], + [ + 486, + 519 + ], + [ + 476, + 524 + ], + [ + 465, + 524 + ], + [ + 452, + 521 + ], + [ + 446, + 513 + ], + [ + 413, + 505 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 355, + 424 + ], + [ + 398, + 431 + ], + [ + 416, + 441 + ], + [ + 436, + 462 + ], + [ + 442, + 489 + ], + [ + 442, + 504 + ], + [ + 435, + 510 + ], + [ + 425, + 514 + ], + [ + 415, + 525 + ], + [ + 332, + 519 + ], + [ + 268, + 440 + ], + [ + 319, + 422 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 330, + 235 + ], + [ + 327, + 225 + ], + [ + 325, + 213 + ], + [ + 319, + 201 + ], + [ + 312, + 192 + ], + [ + 300, + 187 + ], + [ + 281, + 194 + ], + [ + 258, + 198 + ], + [ + 233, + 195 + ], + [ + 217, + 184 + ], + [ + 186, + 186 + ], + [ + 161, + 186 + ], + [ + 160, + 210 + ], + [ + 148, + 213 + ], + [ + 144, + 199 + ], + [ + 142, + 173 + ], + [ + 135, + 158 + ], + [ + 126, + 144 + ], + [ + 113, + 145 + ], + [ + 88, + 152 + ], + [ + 63, + 143 + ], + [ + 43, + 126 + ], + [ + 55, + 0 + ], + [ + 495, + 0 + ], + [ + 507, + 13 + ], + [ + 520, + 25 + ], + [ + 539, + 42 + ], + [ + 552, + 48 + ], + [ + 562, + 60 + ], + [ + 574, + 72 + ], + [ + 593, + 81 + ], + [ + 615, + 91 + ], + [ + 645, + 105 + ], + [ + 657, + 117 + ], + [ + 658, + 141 + ], + [ + 648, + 170 + ], + [ + 625, + 183 + ], + [ + 579, + 187 + ], + [ + 544, + 192 + ], + [ + 540, + 215 + ], + [ + 513, + 228 + ], + [ + 483, + 235 + ], + [ + 473, + 239 + ], + [ + 461, + 244 + ], + [ + 449, + 247 + ], + [ + 433, + 243 + ], + [ + 411, + 241 + ], + [ + 405, + 236 + ], + [ + 396, + 244 + ], + [ + 390, + 256 + ], + [ + 379, + 261 + ], + [ + 365, + 260 + ], + [ + 362, + 260 + ], + [ + 364, + 302 + ], + [ + 362, + 363 + ], + [ + 365, + 370 + ], + [ + 371, + 386 + ], + [ + 375, + 501 + ], + [ + 340, + 500 + ], + [ + 333, + 439 + ], + [ + 325, + 370 + ], + [ + 336, + 360 + ], + [ + 341, + 351 + ], + [ + 337, + 319 + ], + [ + 337, + 292 + ], + [ + 341, + 266 + ], + [ + 338, + 244 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 329, + 442 + ], + [ + 356, + 449 + ], + [ + 378, + 448 + ], + [ + 383, + 447 + ], + [ + 381, + 453 + ], + [ + 368, + 464 + ], + [ + 359, + 470 + ], + [ + 359, + 478 + ], + [ + 363, + 489 + ], + [ + 377, + 491 + ], + [ + 387, + 499 + ], + [ + 394, + 515 + ], + [ + 391, + 533 + ], + [ + 372, + 542 + ], + [ + 357, + 545 + ], + [ + 344, + 539 + ], + [ + 325, + 511 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 42, + 430 + ], + [ + 100, + 412 + ], + [ + 141, + 407 + ], + [ + 209, + 404 + ], + [ + 298, + 409 + ], + [ + 316, + 411 + ], + [ + 346, + 452 + ], + [ + 347, + 473 + ], + [ + 353, + 501 + ], + [ + 356, + 506 + ], + [ + 355, + 524 + ], + [ + 331, + 533 + ], + [ + 311, + 535 + ], + [ + 302, + 543 + ], + [ + 293, + 553 + ], + [ + 284, + 557 + ], + [ + 265, + 558 + ], + [ + 243, + 548 + ], + [ + 233, + 540 + ], + [ + 164, + 542 + ], + [ + 78, + 541 + ], + [ + 50, + 538 + ], + [ + 36, + 487 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 53, + 491 + ], + [ + 62, + 489 + ], + [ + 73, + 488 + ], + [ + 81, + 491 + ], + [ + 96, + 503 + ], + [ + 105, + 522 + ], + [ + 112, + 544 + ], + [ + 115, + 565 + ], + [ + 113, + 574 + ], + [ + 96, + 579 + ], + [ + 76, + 577 + ], + [ + 62, + 568 + ], + [ + 52, + 550 + ], + [ + 48, + 519 + ], + [ + 48, + 503 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 9, + 193 + ], + [ + 8, + 176 + ], + [ + 13, + 155 + ], + [ + 15, + 128 + ], + [ + 18, + 90 + ], + [ + 18, + 70 + ], + [ + 0, + 68 + ], + [ + 0, + 2 + ], + [ + 205, + 0 + ], + [ + 203, + 2 + ], + [ + 167, + 11 + ], + [ + 124, + 23 + ], + [ + 80, + 53 + ], + [ + 65, + 86 + ], + [ + 49, + 210 + ], + [ + 46, + 291 + ], + [ + 44, + 328 + ], + [ + 60, + 332 + ], + [ + 59, + 455 + ], + [ + 66, + 549 + ], + [ + 66, + 572 + ], + [ + 38, + 578 + ], + [ + 0, + 587 + ], + [ + 2, + 461 + ], + [ + 5, + 376 + ], + [ + 7, + 325 + ], + [ + 11, + 258 + ], + [ + 11, + 214 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 990, + 424 + ], + [ + 1003, + 425 + ], + [ + 1011, + 439 + ], + [ + 1012, + 461 + ], + [ + 1012, + 464 + ], + [ + 994, + 463 + ], + [ + 982, + 450 + ], + [ + 976, + 437 + ], + [ + 976, + 430 + ], + [ + 983, + 425 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1002, + 377 + ], + [ + 960, + 385 + ], + [ + 957, + 405 + ], + [ + 961, + 435 + ], + [ + 961, + 436 + ], + [ + 961, + 390 + ], + [ + 985, + 385 + ], + [ + 1002, + 381 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 964, + 403 + ], + [ + 964, + 423 + ], + [ + 955, + 423 + ], + [ + 956, + 402 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 949, + 430 + ], + [ + 959, + 427 + ], + [ + 988, + 428 + ], + [ + 996, + 435 + ], + [ + 1003, + 456 + ], + [ + 1003, + 472 + ], + [ + 1000, + 474 + ], + [ + 993, + 474 + ], + [ + 991, + 470 + ], + [ + 967, + 470 + ], + [ + 956, + 469 + ], + [ + 955, + 474 + ], + [ + 950, + 476 + ], + [ + 942, + 476 + ], + [ + 941, + 473 + ], + [ + 941, + 461 + ], + [ + 942, + 449 + ], + [ + 939, + 446 + ], + [ + 939, + 443 + ], + [ + 941, + 442 + ], + [ + 943, + 441 + ], + [ + 946, + 439 + ], + [ + 946, + 437 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 774, + 423 + ], + [ + 786, + 420 + ], + [ + 841, + 417 + ], + [ + 871, + 416 + ], + [ + 883, + 426 + ], + [ + 895, + 446 + ], + [ + 902, + 451 + ], + [ + 903, + 454 + ], + [ + 900, + 464 + ], + [ + 906, + 481 + ], + [ + 907, + 505 + ], + [ + 905, + 535 + ], + [ + 902, + 542 + ], + [ + 889, + 547 + ], + [ + 880, + 551 + ], + [ + 871, + 551 + ], + [ + 868, + 544 + ], + [ + 868, + 537 + ], + [ + 823, + 541 + ], + [ + 773, + 541 + ], + [ + 758, + 542 + ], + [ + 753, + 557 + ], + [ + 741, + 558 + ], + [ + 735, + 551 + ], + [ + 731, + 510 + ], + [ + 731, + 491 + ], + [ + 735, + 477 + ], + [ + 740, + 467 + ], + [ + 730, + 465 + ], + [ + 732, + 454 + ], + [ + 748, + 456 + ], + [ + 755, + 443 + ], + [ + 762, + 434 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1059, + 337 + ], + [ + 1061, + 328 + ], + [ + 1057, + 322 + ], + [ + 1056, + 313 + ], + [ + 1067, + 298 + ], + [ + 1070, + 297 + ], + [ + 1075, + 290 + ], + [ + 1076, + 284 + ], + [ + 1080, + 276 + ], + [ + 1087, + 265 + ], + [ + 1091, + 256 + ], + [ + 1097, + 252 + ], + [ + 1101, + 254 + ], + [ + 1106, + 259 + ], + [ + 1111, + 261 + ], + [ + 1119, + 298 + ], + [ + 1124, + 352 + ], + [ + 1128, + 390 + ], + [ + 1124, + 406 + ], + [ + 1124, + 422 + ], + [ + 1116, + 439 + ], + [ + 1103, + 441 + ], + [ + 1084, + 440 + ], + [ + 1077, + 436 + ], + [ + 1072, + 427 + ], + [ + 1070, + 423 + ], + [ + 1063, + 420 + ], + [ + 1057, + 417 + ], + [ + 1049, + 407 + ], + [ + 1048, + 403 + ], + [ + 1048, + 395 + ], + [ + 1047, + 392 + ], + [ + 1043, + 387 + ], + [ + 1043, + 379 + ], + [ + 1044, + 368 + ], + [ + 1045, + 354 + ], + [ + 1049, + 350 + ], + [ + 1056, + 342 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1160, + 411 + ], + [ + 1177, + 424 + ], + [ + 1128, + 446 + ], + [ + 1116, + 447 + ], + [ + 1115, + 438 + ], + [ + 1114, + 426 + ], + [ + 1116, + 416 + ], + [ + 1130, + 414 + ], + [ + 1149, + 413 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1161, + 424 + ], + [ + 1162, + 431 + ], + [ + 1129, + 447 + ], + [ + 1118, + 445 + ], + [ + 1117, + 439 + ], + [ + 1122, + 431 + ], + [ + 1137, + 425 + ], + [ + 1152, + 424 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1128, + 404 + ], + [ + 1114, + 397 + ], + [ + 1109, + 389 + ], + [ + 1100, + 381 + ], + [ + 1094, + 367 + ], + [ + 1100, + 360 + ], + [ + 1111, + 353 + ], + [ + 1111, + 342 + ], + [ + 1108, + 331 + ], + [ + 1102, + 322 + ], + [ + 1100, + 310 + ], + [ + 1107, + 295 + ], + [ + 1109, + 281 + ], + [ + 1108, + 261 + ], + [ + 1119, + 245 + ], + [ + 1135, + 234 + ], + [ + 1161, + 228 + ], + [ + 1214, + 288 + ], + [ + 1214, + 328 + ], + [ + 1195, + 378 + ], + [ + 1167, + 404 + ], + [ + 1155, + 412 + ], + [ + 1155, + 440 + ], + [ + 1153, + 438 + ], + [ + 1152, + 408 + ], + [ + 1146, + 414 + ], + [ + 1146, + 431 + ], + [ + 1142, + 434 + ], + [ + 1137, + 414 + ], + [ + 1134, + 412 + ], + [ + 1134, + 436 + ], + [ + 1130, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1179, + 422 + ], + [ + 1156, + 453 + ], + [ + 1140, + 452 + ], + [ + 1128, + 446 + ], + [ + 1128, + 440 + ], + [ + 1135, + 431 + ], + [ + 1162, + 424 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1185, + 424 + ], + [ + 1171, + 451 + ], + [ + 1155, + 451 + ], + [ + 1149, + 449 + ], + [ + 1146, + 441 + ], + [ + 1150, + 430 + ], + [ + 1163, + 427 + ], + [ + 1175, + 423 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1153, + 452 + ], + [ + 1145, + 454 + ], + [ + 1228, + 476 + ], + [ + 2046, + 703 + ], + [ + 2047, + 535 + ], + [ + 1195, + 441 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1153, + 452 + ], + [ + 1145, + 454 + ], + [ + 1228, + 476 + ], + [ + 2046, + 703 + ], + [ + 2047, + 535 + ], + [ + 1195, + 441 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1169, + 408 + ], + [ + 1162, + 402 + ], + [ + 1152, + 400 + ], + [ + 1141, + 400 + ], + [ + 1126, + 374 + ], + [ + 1126, + 347 + ], + [ + 1140, + 311 + ], + [ + 1157, + 284 + ], + [ + 1195, + 282 + ], + [ + 1203, + 293 + ], + [ + 1210, + 319 + ], + [ + 1206, + 346 + ], + [ + 1196, + 367 + ], + [ + 1185, + 383 + ], + [ + 1178, + 403 + ], + [ + 1179, + 441 + ], + [ + 1171, + 442 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1169, + 408 + ], + [ + 1162, + 402 + ], + [ + 1152, + 400 + ], + [ + 1141, + 400 + ], + [ + 1126, + 374 + ], + [ + 1126, + 347 + ], + [ + 1140, + 311 + ], + [ + 1157, + 284 + ], + [ + 1195, + 282 + ], + [ + 1203, + 293 + ], + [ + 1210, + 319 + ], + [ + 1206, + 346 + ], + [ + 1196, + 367 + ], + [ + 1185, + 383 + ], + [ + 1178, + 403 + ], + [ + 1179, + 441 + ], + [ + 1171, + 442 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1193, + 414 + ], + [ + 1211, + 415 + ], + [ + 1233, + 422 + ], + [ + 1196, + 455 + ], + [ + 1181, + 457 + ], + [ + 1176, + 455 + ], + [ + 1167, + 453 + ], + [ + 1165, + 446 + ], + [ + 1166, + 441 + ], + [ + 1177, + 435 + ], + [ + 1180, + 426 + ], + [ + 1180, + 417 + ], + [ + 1182, + 416 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1190, + 391 + ], + [ + 1178, + 388 + ], + [ + 1159, + 383 + ], + [ + 1148, + 386 + ], + [ + 1130, + 386 + ], + [ + 1118, + 379 + ], + [ + 1109, + 352 + ], + [ + 1132, + 303 + ], + [ + 1141, + 287 + ], + [ + 1160, + 276 + ], + [ + 1210, + 269 + ], + [ + 1238, + 270 + ], + [ + 1257, + 298 + ], + [ + 1270, + 314 + ], + [ + 1270, + 323 + ], + [ + 1258, + 335 + ], + [ + 1258, + 339 + ], + [ + 1270, + 339 + ], + [ + 1279, + 337 + ], + [ + 1274, + 345 + ], + [ + 1266, + 354 + ], + [ + 1223, + 372 + ], + [ + 1211, + 375 + ], + [ + 1197, + 377 + ], + [ + 1196, + 435 + ], + [ + 1193, + 435 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1243, + 414 + ], + [ + 1242, + 426 + ], + [ + 1202, + 460 + ], + [ + 1189, + 456 + ], + [ + 1189, + 445 + ], + [ + 1193, + 434 + ], + [ + 1198, + 424 + ], + [ + 1211, + 420 + ], + [ + 1233, + 416 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1211, + 379 + ], + [ + 1206, + 375 + ], + [ + 1190, + 373 + ], + [ + 1174, + 373 + ], + [ + 1160, + 371 + ], + [ + 1149, + 360 + ], + [ + 1145, + 339 + ], + [ + 1149, + 305 + ], + [ + 1149, + 289 + ], + [ + 1151, + 274 + ], + [ + 1157, + 261 + ], + [ + 1164, + 247 + ], + [ + 1197, + 249 + ], + [ + 1223, + 244 + ], + [ + 1259, + 250 + ], + [ + 1276, + 267 + ], + [ + 1278, + 308 + ], + [ + 1286, + 318 + ], + [ + 1288, + 325 + ], + [ + 1248, + 343 + ], + [ + 1215, + 368 + ], + [ + 1214, + 432 + ], + [ + 1210, + 436 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1218, + 465 + ], + [ + 1211, + 463 + ], + [ + 1205, + 436 + ], + [ + 1209, + 425 + ], + [ + 1220, + 422 + ], + [ + 1245, + 421 + ], + [ + 1254, + 423 + ], + [ + 1246, + 442 + ], + [ + 1226, + 458 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1238, + 372 + ], + [ + 1235, + 325 + ], + [ + 1231, + 317 + ], + [ + 1222, + 318 + ], + [ + 1212, + 323 + ], + [ + 1190, + 318 + ], + [ + 1179, + 311 + ], + [ + 1162, + 311 + ], + [ + 1142, + 298 + ], + [ + 1141, + 279 + ], + [ + 1155, + 252 + ], + [ + 1160, + 237 + ], + [ + 1146, + 229 + ], + [ + 1138, + 227 + ], + [ + 1124, + 223 + ], + [ + 1112, + 217 + ], + [ + 1106, + 206 + ], + [ + 1117, + 193 + ], + [ + 1140, + 188 + ], + [ + 1173, + 190 + ], + [ + 1218, + 187 + ], + [ + 1280, + 190 + ], + [ + 1322, + 232 + ], + [ + 1335, + 276 + ], + [ + 1355, + 285 + ], + [ + 1370, + 285 + ], + [ + 1380, + 286 + ], + [ + 1378, + 297 + ], + [ + 1377, + 306 + ], + [ + 1369, + 312 + ], + [ + 1350, + 321 + ], + [ + 1334, + 322 + ], + [ + 1325, + 319 + ], + [ + 1317, + 318 + ], + [ + 1308, + 318 + ], + [ + 1306, + 313 + ], + [ + 1303, + 311 + ], + [ + 1294, + 311 + ], + [ + 1287, + 324 + ], + [ + 1271, + 329 + ], + [ + 1251, + 333 + ], + [ + 1248, + 339 + ], + [ + 1249, + 448 + ], + [ + 1238, + 449 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1261, + 433 + ], + [ + 1255, + 464 + ], + [ + 1240, + 464 + ], + [ + 1227, + 466 + ], + [ + 1217, + 466 + ], + [ + 1211, + 464 + ], + [ + 1214, + 456 + ], + [ + 1225, + 450 + ], + [ + 1232, + 443 + ], + [ + 1242, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1256, + 408 + ], + [ + 1283, + 405 + ], + [ + 1299, + 407 + ], + [ + 1258, + 466 + ], + [ + 1253, + 465 + ], + [ + 1253, + 436 + ], + [ + 1254, + 418 + ], + [ + 1254, + 411 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1303, + 422 + ], + [ + 1278, + 468 + ], + [ + 1261, + 457 + ], + [ + 1261, + 447 + ], + [ + 1261, + 437 + ], + [ + 1270, + 423 + ], + [ + 1289, + 414 + ], + [ + 1299, + 423 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1284, + 308 + ], + [ + 1272, + 302 + ], + [ + 1251, + 298 + ], + [ + 1235, + 298 + ], + [ + 1202, + 290 + ], + [ + 1169, + 264 + ], + [ + 1162, + 248 + ], + [ + 1149, + 211 + ], + [ + 1152, + 202 + ], + [ + 1156, + 190 + ], + [ + 1146, + 185 + ], + [ + 1138, + 175 + ], + [ + 1134, + 159 + ], + [ + 1144, + 159 + ], + [ + 1163, + 148 + ], + [ + 1156, + 143 + ], + [ + 1144, + 142 + ], + [ + 1139, + 133 + ], + [ + 1139, + 126 + ], + [ + 1145, + 124 + ], + [ + 1156, + 123 + ], + [ + 1166, + 118 + ], + [ + 1206, + 112 + ], + [ + 1252, + 104 + ], + [ + 1339, + 128 + ], + [ + 1385, + 165 + ], + [ + 1406, + 188 + ], + [ + 1417, + 254 + ], + [ + 1405, + 267 + ], + [ + 1394, + 275 + ], + [ + 1394, + 278 + ], + [ + 1399, + 283 + ], + [ + 1405, + 286 + ], + [ + 1407, + 293 + ], + [ + 1396, + 296 + ], + [ + 1379, + 296 + ], + [ + 1361, + 302 + ], + [ + 1347, + 291 + ], + [ + 1337, + 287 + ], + [ + 1322, + 290 + ], + [ + 1308, + 299 + ], + [ + 1298, + 304 + ], + [ + 1293, + 311 + ], + [ + 1296, + 353 + ], + [ + 1293, + 427 + ], + [ + 1285, + 432 + ], + [ + 1283, + 371 + ], + [ + 1283, + 328 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1356, + 401 + ], + [ + 1399, + 397 + ], + [ + 1427, + 399 + ], + [ + 1449, + 414 + ], + [ + 1380, + 485 + ], + [ + 1308, + 486 + ], + [ + 1301, + 481 + ], + [ + 1278, + 477 + ], + [ + 1278, + 458 + ], + [ + 1279, + 438 + ], + [ + 1284, + 416 + ], + [ + 1295, + 405 + ], + [ + 1325, + 403 + ], + [ + 1349, + 402 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1361, + 434 + ], + [ + 1380, + 425 + ], + [ + 1409, + 416 + ], + [ + 1457, + 412 + ], + [ + 1428, + 470 + ], + [ + 1368, + 483 + ], + [ + 1308, + 488 + ], + [ + 1303, + 478 + ], + [ + 1303, + 464 + ], + [ + 1309, + 452 + ], + [ + 1346, + 440 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1356, + 424 + ], + [ + 1371, + 430 + ], + [ + 1379, + 436 + ], + [ + 1387, + 447 + ], + [ + 1363, + 477 + ], + [ + 1346, + 485 + ], + [ + 1334, + 483 + ], + [ + 1327, + 479 + ], + [ + 1325, + 472 + ], + [ + 1325, + 462 + ], + [ + 1328, + 455 + ], + [ + 1337, + 449 + ], + [ + 1353, + 449 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1350, + 237 + ], + [ + 1354, + 214 + ], + [ + 1342, + 201 + ], + [ + 1329, + 200 + ], + [ + 1314, + 200 + ], + [ + 1300, + 201 + ], + [ + 1286, + 208 + ], + [ + 1267, + 210 + ], + [ + 1240, + 194 + ], + [ + 1227, + 167 + ], + [ + 1223, + 136 + ], + [ + 1211, + 118 + ], + [ + 1193, + 113 + ], + [ + 1183, + 110 + ], + [ + 1182, + 94 + ], + [ + 1217, + 60 + ], + [ + 1414, + 25 + ], + [ + 1495, + 69 + ], + [ + 1507, + 127 + ], + [ + 1509, + 171 + ], + [ + 1505, + 212 + ], + [ + 1499, + 231 + ], + [ + 1489, + 249 + ], + [ + 1479, + 258 + ], + [ + 1460, + 268 + ], + [ + 1453, + 270 + ], + [ + 1447, + 275 + ], + [ + 1435, + 282 + ], + [ + 1419, + 284 + ], + [ + 1413, + 279 + ], + [ + 1402, + 273 + ], + [ + 1385, + 277 + ], + [ + 1359, + 274 + ], + [ + 1359, + 298 + ], + [ + 1363, + 310 + ], + [ + 1366, + 333 + ], + [ + 1363, + 376 + ], + [ + 1361, + 429 + ], + [ + 1363, + 489 + ], + [ + 1347, + 493 + ], + [ + 1350, + 399 + ], + [ + 1348, + 352 + ], + [ + 1346, + 316 + ], + [ + 1344, + 267 + ], + [ + 1344, + 253 + ], + [ + 1346, + 245 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1467, + 412 + ], + [ + 1403, + 500 + ], + [ + 1392, + 493 + ], + [ + 1366, + 487 + ], + [ + 1364, + 484 + ], + [ + 1363, + 475 + ], + [ + 1363, + 466 + ], + [ + 1371, + 448 + ], + [ + 1380, + 436 + ], + [ + 1387, + 428 + ], + [ + 1403, + 419 + ], + [ + 1429, + 414 + ], + [ + 1454, + 409 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1485, + 275 + ], + [ + 1484, + 258 + ], + [ + 1483, + 240 + ], + [ + 1438, + 187 + ], + [ + 1431, + 181 + ], + [ + 1411, + 183 + ], + [ + 1394, + 174 + ], + [ + 1369, + 163 + ], + [ + 1337, + 176 + ], + [ + 1296, + 179 + ], + [ + 1277, + 172 + ], + [ + 1261, + 147 + ], + [ + 1245, + 134 + ], + [ + 1226, + 137 + ], + [ + 1213, + 126 + ], + [ + 1209, + 115 + ], + [ + 1190, + 112 + ], + [ + 1178, + 108 + ], + [ + 1181, + 97 + ], + [ + 1167, + 96 + ], + [ + 1158, + 94 + ], + [ + 1156, + 87 + ], + [ + 1165, + 68 + ], + [ + 1170, + 60 + ], + [ + 1171, + 53 + ], + [ + 1171, + 47 + ], + [ + 1171, + 39 + ], + [ + 1179, + 31 + ], + [ + 1184, + 23 + ], + [ + 1197, + 20 + ], + [ + 1202, + 16 + ], + [ + 1200, + 10 + ], + [ + 1196, + 4 + ], + [ + 1196, + 0 + ], + [ + 1675, + 1 + ], + [ + 1675, + 6 + ], + [ + 1680, + 11 + ], + [ + 1690, + 17 + ], + [ + 1699, + 18 + ], + [ + 1698, + 26 + ], + [ + 1695, + 31 + ], + [ + 1688, + 35 + ], + [ + 1676, + 39 + ], + [ + 1673, + 52 + ], + [ + 1679, + 58 + ], + [ + 1683, + 61 + ], + [ + 1683, + 73 + ], + [ + 1687, + 79 + ], + [ + 1698, + 82 + ], + [ + 1707, + 84 + ], + [ + 1713, + 90 + ], + [ + 1720, + 102 + ], + [ + 1710, + 112 + ], + [ + 1698, + 118 + ], + [ + 1683, + 118 + ], + [ + 1682, + 127 + ], + [ + 1681, + 135 + ], + [ + 1671, + 143 + ], + [ + 1665, + 142 + ], + [ + 1650, + 141 + ], + [ + 1646, + 147 + ], + [ + 1669, + 158 + ], + [ + 1681, + 168 + ], + [ + 1702, + 171 + ], + [ + 1708, + 173 + ], + [ + 1706, + 184 + ], + [ + 1714, + 189 + ], + [ + 1721, + 187 + ], + [ + 1730, + 188 + ], + [ + 1731, + 199 + ], + [ + 1716, + 207 + ], + [ + 1703, + 203 + ], + [ + 1670, + 211 + ], + [ + 1648, + 209 + ], + [ + 1632, + 212 + ], + [ + 1620, + 213 + ], + [ + 1608, + 208 + ], + [ + 1594, + 205 + ], + [ + 1580, + 206 + ], + [ + 1555, + 218 + ], + [ + 1543, + 231 + ], + [ + 1534, + 232 + ], + [ + 1524, + 232 + ], + [ + 1512, + 235 + ], + [ + 1505, + 242 + ], + [ + 1502, + 305 + ], + [ + 1501, + 369 + ], + [ + 1504, + 392 + ], + [ + 1497, + 403 + ], + [ + 1483, + 391 + ], + [ + 1483, + 353 + ], + [ + 1484, + 298 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1678, + 397 + ], + [ + 1692, + 392 + ], + [ + 1727, + 394 + ], + [ + 1752, + 404 + ], + [ + 1715, + 463 + ], + [ + 1697, + 430 + ], + [ + 1687, + 416 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1802, + 347 + ], + [ + 1892, + 336 + ], + [ + 2004, + 325 + ], + [ + 2048, + 321 + ], + [ + 2048, + 477 + ], + [ + 1851, + 556 + ], + [ + 1835, + 562 + ], + [ + 1819, + 581 + ], + [ + 1799, + 586 + ], + [ + 1769, + 586 + ], + [ + 1739, + 566 + ], + [ + 1694, + 465 + ], + [ + 1706, + 429 + ], + [ + 1752, + 389 + ], + [ + 1789, + 353 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1917, + 401 + ], + [ + 2016, + 390 + ], + [ + 2048, + 387 + ], + [ + 2048, + 597 + ], + [ + 1962, + 605 + ], + [ + 1916, + 602 + ], + [ + 1880, + 591 + ], + [ + 1873, + 574 + ], + [ + 1826, + 563 + ], + [ + 1820, + 537 + ], + [ + 1819, + 511 + ], + [ + 1824, + 495 + ], + [ + 1828, + 462 + ], + [ + 1844, + 445 + ], + [ + 1885, + 417 + ], + [ + 1901, + 405 + ], + [ + 1913, + 403 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1930, + 446 + ], + [ + 1963, + 425 + ], + [ + 2013, + 408 + ], + [ + 2006, + 394 + ], + [ + 2027, + 389 + ], + [ + 2048, + 385 + ], + [ + 2048, + 643 + ], + [ + 1999, + 627 + ], + [ + 1974, + 613 + ], + [ + 1956, + 598 + ], + [ + 1903, + 587 + ], + [ + 1893, + 568 + ], + [ + 1894, + 539 + ], + [ + 1900, + 522 + ], + [ + 1909, + 505 + ], + [ + 1917, + 463 + ], + [ + 1925, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1521, + 326 + ], + [ + 1519, + 387 + ], + [ + 1515, + 387 + ], + [ + 1515, + 327 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1508, + 330 + ], + [ + 1523, + 325 + ], + [ + 1522, + 368 + ], + [ + 1510, + 368 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1454, + 394 + ], + [ + 1472, + 386 + ], + [ + 1529, + 377 + ], + [ + 1597, + 378 + ], + [ + 1647, + 381 + ], + [ + 1681, + 390 + ], + [ + 1695, + 401 + ], + [ + 1719, + 453 + ], + [ + 1734, + 492 + ], + [ + 1733, + 502 + ], + [ + 1739, + 513 + ], + [ + 1739, + 529 + ], + [ + 1749, + 542 + ], + [ + 1751, + 586 + ], + [ + 1751, + 616 + ], + [ + 1747, + 651 + ], + [ + 1738, + 668 + ], + [ + 1723, + 671 + ], + [ + 1701, + 666 + ], + [ + 1692, + 655 + ], + [ + 1690, + 643 + ], + [ + 1675, + 637 + ], + [ + 1642, + 640 + ], + [ + 1544, + 642 + ], + [ + 1520, + 642 + ], + [ + 1496, + 641 + ], + [ + 1494, + 659 + ], + [ + 1489, + 667 + ], + [ + 1468, + 667 + ], + [ + 1446, + 665 + ], + [ + 1437, + 646 + ], + [ + 1436, + 621 + ], + [ + 1424, + 616 + ], + [ + 1420, + 630 + ], + [ + 1415, + 636 + ], + [ + 1403, + 633 + ], + [ + 1392, + 631 + ], + [ + 1388, + 615 + ], + [ + 1386, + 588 + ], + [ + 1385, + 556 + ], + [ + 1394, + 518 + ], + [ + 1400, + 495 + ], + [ + 1412, + 475 + ], + [ + 1396, + 473 + ], + [ + 1385, + 469 + ], + [ + 1384, + 458 + ], + [ + 1394, + 448 + ], + [ + 1412, + 448 + ], + [ + 1422, + 448 + ], + [ + 1432, + 431 + ], + [ + 1447, + 402 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1562, + 497 + ], + [ + 1663, + 499 + ], + [ + 1663, + 521 + ], + [ + 1560, + 519 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1015, + 39 + ], + [ + 1015, + 49 + ], + [ + 985, + 50 + ], + [ + 986, + 40 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1029, + 178 + ], + [ + 1029, + 185 + ], + [ + 1010, + 185 + ], + [ + 1010, + 179 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1035, + 244 + ], + [ + 1035, + 251 + ], + [ + 1020, + 251 + ], + [ + 1020, + 248 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1037, + 271 + ], + [ + 1037, + 276 + ], + [ + 1023, + 276 + ], + [ + 1025, + 271 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1041, + 299 + ], + [ + 1045, + 303 + ], + [ + 1030, + 302 + ], + [ + 1032, + 298 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 779, + 525 + ], + [ + 831, + 525 + ], + [ + 831, + 511 + ], + [ + 779, + 511 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000002_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000002_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..3b40b9d7ef69343e6f7d894b82cce403e42746b8 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000002_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000003_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000003_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8b19074112304a9e737aae7215dbef85c82a6302 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000003_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000003_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000003_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..777da41cca392b7cdf2e23cb0c20fe3d3a585a70 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000003_000019_gtFine_polygons.json @@ -0,0 +1,4208 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "terrain", + "polygon": [ + [ + 1100, + 443 + ], + [ + 1128, + 437 + ], + [ + 1158, + 437 + ], + [ + 1173, + 437 + ], + [ + 1188, + 435 + ], + [ + 1197, + 435 + ], + [ + 1215, + 436 + ], + [ + 1219, + 437 + ], + [ + 1220, + 446 + ], + [ + 1225, + 450 + ], + [ + 1230, + 450 + ], + [ + 1235, + 450 + ], + [ + 1238, + 441 + ], + [ + 1247, + 438 + ], + [ + 1290, + 435 + ], + [ + 1345, + 435 + ], + [ + 1376, + 430 + ], + [ + 1392, + 428 + ], + [ + 1410, + 433 + ], + [ + 1464, + 435 + ], + [ + 1503, + 433 + ], + [ + 1519, + 431 + ], + [ + 1588, + 436 + ], + [ + 1626, + 436 + ], + [ + 1641, + 433 + ], + [ + 1658, + 432 + ], + [ + 1672, + 434 + ], + [ + 1686, + 437 + ], + [ + 1699, + 437 + ], + [ + 1714, + 437 + ], + [ + 1726, + 437 + ], + [ + 1752, + 439 + ], + [ + 1767, + 444 + ], + [ + 1777, + 446 + ], + [ + 1816, + 447 + ], + [ + 1850, + 446 + ], + [ + 1868, + 447 + ], + [ + 1885, + 447 + ], + [ + 1906, + 445 + ], + [ + 1917, + 440 + ], + [ + 1935, + 440 + ], + [ + 1948, + 444 + ], + [ + 1973, + 449 + ], + [ + 2013, + 452 + ], + [ + 2032, + 437 + ], + [ + 2048, + 436 + ], + [ + 2048, + 611 + ], + [ + 2048, + 613 + ], + [ + 1541, + 581 + ], + [ + 1342, + 568 + ], + [ + 1306, + 560 + ], + [ + 1205, + 523 + ], + [ + 1207, + 514 + ], + [ + 1212, + 506 + ], + [ + 1220, + 497 + ], + [ + 1229, + 497 + ], + [ + 1249, + 493 + ], + [ + 1265, + 493 + ], + [ + 1274, + 493 + ], + [ + 1276, + 490 + ], + [ + 1132, + 482 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 25, + 1 + ], + [ + 1203, + 0 + ], + [ + 1198, + 311 + ], + [ + 76, + 243 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 269, + 370 + ], + [ + 717, + 429 + ], + [ + 1412, + 462 + ], + [ + 2048, + 567 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 498 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 2, + 269 + ], + [ + 634, + 302 + ], + [ + 621, + 424 + ], + [ + 498, + 420 + ], + [ + 498, + 408 + ], + [ + 438, + 405 + ], + [ + 409, + 398 + ], + [ + 382, + 397 + ], + [ + 344, + 392 + ], + [ + 332, + 389 + ], + [ + 289, + 388 + ], + [ + 31, + 422 + ], + [ + 0, + 422 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 171, + 125 + ], + [ + 173, + 120 + ], + [ + 173, + 109 + ], + [ + 177, + 105 + ], + [ + 182, + 106 + ], + [ + 185, + 104 + ], + [ + 187, + 99 + ], + [ + 190, + 95 + ], + [ + 194, + 92 + ], + [ + 194, + 88 + ], + [ + 200, + 88 + ], + [ + 200, + 94 + ], + [ + 206, + 95 + ], + [ + 207, + 91 + ], + [ + 207, + 74 + ], + [ + 224, + 73 + ], + [ + 224, + 87 + ], + [ + 230, + 89 + ], + [ + 230, + 75 + ], + [ + 246, + 76 + ], + [ + 247, + 79 + ], + [ + 251, + 80 + ], + [ + 265, + 74 + ], + [ + 263, + 62 + ], + [ + 268, + 62 + ], + [ + 269, + 70 + ], + [ + 275, + 67 + ], + [ + 276, + 52 + ], + [ + 289, + 52 + ], + [ + 293, + 57 + ], + [ + 304, + 65 + ], + [ + 325, + 66 + ], + [ + 326, + 82 + ], + [ + 350, + 100 + ], + [ + 377, + 121 + ], + [ + 400, + 135 + ], + [ + 431, + 150 + ], + [ + 430, + 158 + ], + [ + 173, + 135 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 436, + 412 + ], + [ + 451, + 410 + ], + [ + 473, + 409 + ], + [ + 489, + 425 + ], + [ + 493, + 436 + ], + [ + 493, + 464 + ], + [ + 427, + 446 + ], + [ + 429, + 431 + ], + [ + 433, + 421 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 674, + 0 + ], + [ + 674, + 177 + ], + [ + 652, + 172 + ], + [ + 660, + 0 + ] + ] + }, + { + "label": "bridge", + "polygon": [ + [ + 68, + 115 + ], + [ + 346, + 138 + ], + [ + 380, + 142 + ], + [ + 461, + 146 + ], + [ + 482, + 148 + ], + [ + 998, + 192 + ], + [ + 998, + 202 + ], + [ + 1136, + 213 + ], + [ + 1304, + 226 + ], + [ + 1276, + 443 + ], + [ + 870, + 462 + ], + [ + 616, + 419 + ], + [ + 614, + 394 + ], + [ + 613, + 367 + ], + [ + 610, + 350 + ], + [ + 610, + 320 + ], + [ + 475, + 313 + ], + [ + 445, + 312 + ], + [ + 446, + 463 + ], + [ + 401, + 441 + ], + [ + 405, + 311 + ], + [ + 388, + 309 + ], + [ + 387, + 426 + ], + [ + 353, + 390 + ], + [ + 350, + 310 + ], + [ + 339, + 307 + ], + [ + 339, + 426 + ], + [ + 303, + 420 + ], + [ + 302, + 304 + ], + [ + 300, + 305 + ], + [ + 298, + 428 + ], + [ + 203, + 385 + ], + [ + 201, + 303 + ], + [ + 37, + 298 + ], + [ + 2, + 296 + ], + [ + 1, + 107 + ], + [ + 50, + 109 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 0, + 2 + ], + [ + 149, + 1 + ], + [ + 147, + 3 + ], + [ + 143, + 9 + ], + [ + 145, + 14 + ], + [ + 146, + 18 + ], + [ + 146, + 24 + ], + [ + 146, + 27 + ], + [ + 141, + 30 + ], + [ + 129, + 35 + ], + [ + 125, + 39 + ], + [ + 121, + 47 + ], + [ + 119, + 60 + ], + [ + 126, + 66 + ], + [ + 132, + 70 + ], + [ + 141, + 71 + ], + [ + 152, + 63 + ], + [ + 159, + 59 + ], + [ + 168, + 54 + ], + [ + 180, + 52 + ], + [ + 181, + 56 + ], + [ + 173, + 67 + ], + [ + 173, + 75 + ], + [ + 176, + 76 + ], + [ + 181, + 80 + ], + [ + 176, + 90 + ], + [ + 167, + 98 + ], + [ + 172, + 104 + ], + [ + 169, + 108 + ], + [ + 156, + 115 + ], + [ + 156, + 120 + ], + [ + 139, + 125 + ], + [ + 121, + 126 + ], + [ + 101, + 138 + ], + [ + 104, + 147 + ], + [ + 110, + 149 + ], + [ + 117, + 153 + ], + [ + 117, + 157 + ], + [ + 115, + 165 + ], + [ + 104, + 174 + ], + [ + 88, + 174 + ], + [ + 72, + 177 + ], + [ + 40, + 180 + ], + [ + 22, + 181 + ], + [ + 0, + 173 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 542, + 398 + ], + [ + 565, + 398 + ], + [ + 584, + 407 + ], + [ + 542, + 438 + ], + [ + 520, + 431 + ], + [ + 521, + 419 + ], + [ + 527, + 408 + ], + [ + 533, + 403 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 581, + 402 + ], + [ + 600, + 404 + ], + [ + 608, + 406 + ], + [ + 621, + 411 + ], + [ + 610, + 430 + ], + [ + 554, + 428 + ], + [ + 562, + 410 + ], + [ + 572, + 402 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 600, + 435 + ], + [ + 607, + 417 + ], + [ + 622, + 408 + ], + [ + 643, + 408 + ], + [ + 630, + 443 + ], + [ + 608, + 440 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 969, + 474 + ], + [ + 906, + 476 + ], + [ + 793, + 467 + ], + [ + 757, + 461 + ], + [ + 790, + 450 + ], + [ + 849, + 454 + ], + [ + 958, + 454 + ], + [ + 973, + 454 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 744, + 414 + ], + [ + 777, + 411 + ], + [ + 782, + 413 + ], + [ + 786, + 424 + ], + [ + 791, + 432 + ], + [ + 791, + 445 + ], + [ + 786, + 453 + ], + [ + 787, + 459 + ], + [ + 780, + 462 + ], + [ + 760, + 463 + ], + [ + 749, + 461 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 818, + 430 + ], + [ + 819, + 461 + ], + [ + 812, + 459 + ], + [ + 814, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 527, + 461 + ], + [ + 531, + 442 + ], + [ + 543, + 422 + ], + [ + 554, + 419 + ], + [ + 561, + 419 + ], + [ + 586, + 416 + ], + [ + 605, + 418 + ], + [ + 613, + 424 + ], + [ + 616, + 430 + ], + [ + 626, + 446 + ], + [ + 616, + 472 + ], + [ + 609, + 475 + ], + [ + 605, + 477 + ], + [ + 598, + 480 + ], + [ + 592, + 480 + ], + [ + 588, + 474 + ], + [ + 586, + 473 + ], + [ + 574, + 473 + ], + [ + 567, + 473 + ], + [ + 555, + 472 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 360, + 416 + ], + [ + 381, + 418 + ], + [ + 394, + 424 + ], + [ + 406, + 419 + ], + [ + 414, + 420 + ], + [ + 430, + 429 + ], + [ + 436, + 433 + ], + [ + 444, + 436 + ], + [ + 454, + 436 + ], + [ + 462, + 442 + ], + [ + 474, + 443 + ], + [ + 479, + 445 + ], + [ + 484, + 448 + ], + [ + 491, + 447 + ], + [ + 497, + 444 + ], + [ + 508, + 447 + ], + [ + 515, + 449 + ], + [ + 527, + 450 + ], + [ + 535, + 450 + ], + [ + 543, + 452 + ], + [ + 558, + 463 + ], + [ + 564, + 475 + ], + [ + 568, + 481 + ], + [ + 568, + 488 + ], + [ + 572, + 491 + ], + [ + 573, + 500 + ], + [ + 517, + 499 + ], + [ + 431, + 491 + ], + [ + 380, + 484 + ], + [ + 353, + 479 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 360, + 416 + ], + [ + 381, + 418 + ], + [ + 394, + 424 + ], + [ + 406, + 419 + ], + [ + 414, + 420 + ], + [ + 430, + 429 + ], + [ + 436, + 433 + ], + [ + 444, + 436 + ], + [ + 454, + 436 + ], + [ + 462, + 442 + ], + [ + 474, + 443 + ], + [ + 479, + 445 + ], + [ + 484, + 448 + ], + [ + 491, + 447 + ], + [ + 497, + 444 + ], + [ + 508, + 447 + ], + [ + 515, + 449 + ], + [ + 527, + 450 + ], + [ + 535, + 450 + ], + [ + 543, + 452 + ], + [ + 558, + 463 + ], + [ + 564, + 475 + ], + [ + 568, + 481 + ], + [ + 568, + 488 + ], + [ + 572, + 491 + ], + [ + 573, + 500 + ], + [ + 517, + 499 + ], + [ + 431, + 491 + ], + [ + 380, + 484 + ], + [ + 353, + 479 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 481, + 28 + ], + [ + 473, + 465 + ], + [ + 457, + 461 + ], + [ + 462, + 27 + ], + [ + 468, + 22 + ], + [ + 477, + 22 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 459, + 352 + ], + [ + 461, + 467 + ], + [ + 464, + 470 + ], + [ + 463, + 355 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 480, + 374 + ], + [ + 480, + 396 + ], + [ + 450, + 396 + ], + [ + 450, + 373 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 450, + 338 + ], + [ + 459, + 335 + ], + [ + 469, + 338 + ], + [ + 477, + 348 + ], + [ + 477, + 357 + ], + [ + 474, + 364 + ], + [ + 467, + 368 + ], + [ + 461, + 369 + ], + [ + 454, + 368 + ], + [ + 447, + 364 + ], + [ + 443, + 355 + ], + [ + 442, + 348 + ], + [ + 444, + 343 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 528, + 440 + ], + [ + 528, + 464 + ], + [ + 531, + 466 + ], + [ + 528, + 434 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 513, + 428 + ], + [ + 538, + 425 + ], + [ + 539, + 444 + ], + [ + 515, + 446 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 528, + 400 + ], + [ + 534, + 406 + ], + [ + 539, + 411 + ], + [ + 539, + 421 + ], + [ + 536, + 424 + ], + [ + 525, + 426 + ], + [ + 518, + 426 + ], + [ + 514, + 423 + ], + [ + 511, + 418 + ], + [ + 511, + 408 + ], + [ + 519, + 402 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 382, + 1 + ], + [ + 373, + 519 + ], + [ + 338, + 448 + ], + [ + 345, + 0 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 33, + 331 + ], + [ + 48, + 333 + ], + [ + 63, + 331 + ], + [ + 78, + 342 + ], + [ + 76, + 357 + ], + [ + 88, + 364 + ], + [ + 100, + 364 + ], + [ + 109, + 362 + ], + [ + 112, + 345 + ], + [ + 110, + 332 + ], + [ + 120, + 319 + ], + [ + 125, + 318 + ], + [ + 142, + 319 + ], + [ + 159, + 325 + ], + [ + 180, + 328 + ], + [ + 192, + 330 + ], + [ + 201, + 337 + ], + [ + 208, + 348 + ], + [ + 213, + 359 + ], + [ + 219, + 368 + ], + [ + 228, + 372 + ], + [ + 237, + 374 + ], + [ + 247, + 379 + ], + [ + 255, + 373 + ], + [ + 261, + 365 + ], + [ + 270, + 360 + ], + [ + 281, + 378 + ], + [ + 289, + 399 + ], + [ + 310, + 410 + ], + [ + 324, + 410 + ], + [ + 336, + 413 + ], + [ + 345, + 420 + ], + [ + 353, + 429 + ], + [ + 363, + 441 + ], + [ + 369, + 457 + ], + [ + 370, + 471 + ], + [ + 380, + 496 + ], + [ + 384, + 500 + ], + [ + 392, + 507 + ], + [ + 394, + 516 + ], + [ + 404, + 526 + ], + [ + 412, + 531 + ], + [ + 416, + 538 + ], + [ + 422, + 544 + ], + [ + 429, + 551 + ], + [ + 415, + 560 + ], + [ + 396, + 565 + ], + [ + 379, + 570 + ], + [ + 349, + 577 + ], + [ + 328, + 580 + ], + [ + 306, + 586 + ], + [ + 282, + 584 + ], + [ + 212, + 580 + ], + [ + 142, + 573 + ], + [ + 95, + 576 + ], + [ + 50, + 584 + ], + [ + 0, + 583 + ], + [ + 2, + 327 + ], + [ + 8, + 327 + ], + [ + 18, + 329 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 892, + 332 + ], + [ + 888, + 445 + ], + [ + 886, + 443 + ], + [ + 889, + 329 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 860, + 318 + ], + [ + 901, + 319 + ], + [ + 901, + 334 + ], + [ + 859, + 333 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 886, + 346 + ], + [ + 896, + 349 + ], + [ + 900, + 354 + ], + [ + 901, + 364 + ], + [ + 900, + 370 + ], + [ + 895, + 373 + ], + [ + 890, + 374 + ], + [ + 884, + 374 + ], + [ + 879, + 369 + ], + [ + 876, + 360 + ], + [ + 879, + 353 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1043, + 423 + ], + [ + 1055, + 407 + ], + [ + 1068, + 401 + ], + [ + 1078, + 394 + ], + [ + 1078, + 391 + ], + [ + 1065, + 386 + ], + [ + 1054, + 385 + ], + [ + 1048, + 383 + ], + [ + 1045, + 380 + ], + [ + 1044, + 367 + ], + [ + 1055, + 356 + ], + [ + 1067, + 343 + ], + [ + 1086, + 329 + ], + [ + 1501, + 266 + ], + [ + 1511, + 442 + ], + [ + 1053, + 449 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1214, + 392 + ], + [ + 1226, + 388 + ], + [ + 1285, + 368 + ], + [ + 1318, + 351 + ], + [ + 1380, + 332 + ], + [ + 1428, + 319 + ], + [ + 1484, + 298 + ], + [ + 1544, + 276 + ], + [ + 1519, + 441 + ], + [ + 1198, + 447 + ], + [ + 1161, + 443 + ], + [ + 1155, + 438 + ], + [ + 1154, + 426 + ], + [ + 1162, + 406 + ], + [ + 1170, + 398 + ], + [ + 1196, + 395 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1195, + 405 + ], + [ + 1210, + 405 + ], + [ + 1212, + 406 + ], + [ + 1212, + 411 + ], + [ + 1211, + 416 + ], + [ + 1220, + 424 + ], + [ + 1220, + 439 + ], + [ + 1208, + 432 + ], + [ + 1204, + 436 + ], + [ + 1196, + 440 + ], + [ + 1183, + 436 + ], + [ + 1183, + 428 + ], + [ + 1188, + 422 + ], + [ + 1196, + 417 + ], + [ + 1200, + 408 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1406, + 406 + ], + [ + 1486, + 405 + ], + [ + 1506, + 415 + ], + [ + 1500, + 436 + ], + [ + 1401, + 436 + ], + [ + 1400, + 418 + ], + [ + 1409, + 419 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1135, + 340 + ], + [ + 1130, + 437 + ], + [ + 1111, + 440 + ], + [ + 1115, + 340 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1080, + 243 + ], + [ + 1160, + 244 + ], + [ + 1215, + 284 + ], + [ + 1215, + 353 + ], + [ + 1135, + 349 + ], + [ + 1078, + 347 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 995, + 125 + ], + [ + 1003, + 118 + ], + [ + 1007, + 106 + ], + [ + 1005, + 98 + ], + [ + 992, + 98 + ], + [ + 984, + 97 + ], + [ + 988, + 88 + ], + [ + 994, + 83 + ], + [ + 996, + 75 + ], + [ + 1004, + 71 + ], + [ + 1014, + 71 + ], + [ + 1022, + 71 + ], + [ + 1024, + 60 + ], + [ + 1033, + 51 + ], + [ + 1048, + 54 + ], + [ + 1057, + 63 + ], + [ + 1072, + 64 + ], + [ + 1078, + 57 + ], + [ + 1073, + 49 + ], + [ + 1073, + 45 + ], + [ + 1075, + 38 + ], + [ + 1073, + 33 + ], + [ + 1066, + 22 + ], + [ + 1068, + 13 + ], + [ + 1070, + 5 + ], + [ + 1073, + 0 + ], + [ + 1557, + 0 + ], + [ + 1557, + 2 + ], + [ + 1524, + 434 + ], + [ + 1515, + 429 + ], + [ + 1507, + 422 + ], + [ + 1498, + 418 + ], + [ + 1486, + 410 + ], + [ + 1480, + 404 + ], + [ + 1479, + 393 + ], + [ + 1482, + 380 + ], + [ + 1480, + 352 + ], + [ + 1477, + 336 + ], + [ + 1477, + 325 + ], + [ + 1476, + 315 + ], + [ + 1468, + 306 + ], + [ + 1454, + 306 + ], + [ + 1430, + 303 + ], + [ + 1428, + 301 + ], + [ + 1418, + 313 + ], + [ + 1405, + 325 + ], + [ + 1393, + 331 + ], + [ + 1371, + 340 + ], + [ + 1356, + 336 + ], + [ + 1343, + 326 + ], + [ + 1319, + 314 + ], + [ + 1301, + 308 + ], + [ + 1280, + 304 + ], + [ + 1272, + 299 + ], + [ + 1254, + 300 + ], + [ + 1238, + 303 + ], + [ + 1237, + 303 + ], + [ + 1237, + 451 + ], + [ + 1231, + 456 + ], + [ + 1229, + 457 + ], + [ + 1221, + 452 + ], + [ + 1219, + 437 + ], + [ + 1215, + 356 + ], + [ + 1213, + 306 + ], + [ + 1206, + 309 + ], + [ + 1193, + 310 + ], + [ + 1184, + 301 + ], + [ + 1182, + 292 + ], + [ + 1178, + 283 + ], + [ + 1168, + 274 + ], + [ + 1162, + 276 + ], + [ + 1154, + 282 + ], + [ + 1149, + 290 + ], + [ + 1138, + 290 + ], + [ + 1124, + 282 + ], + [ + 1116, + 271 + ], + [ + 1108, + 264 + ], + [ + 1108, + 256 + ], + [ + 1114, + 253 + ], + [ + 1111, + 242 + ], + [ + 1102, + 240 + ], + [ + 1091, + 243 + ], + [ + 1080, + 242 + ], + [ + 1073, + 251 + ], + [ + 1067, + 251 + ], + [ + 1061, + 251 + ], + [ + 1055, + 256 + ], + [ + 1046, + 255 + ], + [ + 1040, + 248 + ], + [ + 1042, + 235 + ], + [ + 1042, + 217 + ], + [ + 1039, + 207 + ], + [ + 1035, + 199 + ], + [ + 1035, + 191 + ], + [ + 1035, + 181 + ], + [ + 1024, + 182 + ], + [ + 1023, + 187 + ], + [ + 1017, + 188 + ], + [ + 1014, + 187 + ], + [ + 1013, + 181 + ], + [ + 1008, + 178 + ], + [ + 1004, + 169 + ], + [ + 999, + 160 + ], + [ + 997, + 151 + ], + [ + 993, + 145 + ], + [ + 992, + 139 + ], + [ + 992, + 131 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1543, + 0 + ], + [ + 2047, + 2 + ], + [ + 2048, + 486 + ], + [ + 1517, + 436 + ], + [ + 1516, + 410 + ], + [ + 1527, + 407 + ], + [ + 1525, + 261 + ], + [ + 1524, + 230 + ], + [ + 1520, + 213 + ], + [ + 1519, + 199 + ], + [ + 1540, + 192 + ], + [ + 1542, + 95 + ], + [ + 1543, + 35 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 981, + 436 + ], + [ + 980, + 450 + ], + [ + 966, + 466 + ], + [ + 934, + 464 + ], + [ + 899, + 476 + ], + [ + 882, + 476 + ], + [ + 856, + 474 + ], + [ + 840, + 470 + ], + [ + 838, + 466 + ], + [ + 850, + 455 + ], + [ + 864, + 446 + ], + [ + 871, + 444 + ], + [ + 888, + 443 + ], + [ + 903, + 441 + ], + [ + 921, + 437 + ], + [ + 939, + 439 + ], + [ + 963, + 439 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 207, + 606 + ], + [ + 264, + 611 + ], + [ + 101, + 651 + ], + [ + 33, + 671 + ], + [ + 0, + 683 + ], + [ + 0, + 608 + ], + [ + 139, + 604 + ], + [ + 183, + 604 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1766, + 684 + ], + [ + 1842, + 682 + ], + [ + 1884, + 685 + ], + [ + 1991, + 702 + ], + [ + 2029, + 710 + ], + [ + 2048, + 717 + ], + [ + 2047, + 823 + ], + [ + 1920, + 774 + ], + [ + 1777, + 720 + ], + [ + 1758, + 708 + ], + [ + 1758, + 691 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1964, + 145 + ], + [ + 2030, + 150 + ], + [ + 2027, + 221 + ], + [ + 1966, + 213 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1958, + 75 + ], + [ + 1958, + 55 + ], + [ + 1965, + 35 + ], + [ + 1979, + 21 + ], + [ + 1993, + 16 + ], + [ + 2012, + 21 + ], + [ + 2033, + 38 + ], + [ + 2044, + 57 + ], + [ + 2048, + 70 + ], + [ + 2045, + 118 + ], + [ + 2030, + 139 + ], + [ + 2006, + 150 + ], + [ + 1981, + 145 + ], + [ + 1966, + 131 + ], + [ + 1957, + 94 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2019, + 84 + ], + [ + 2001, + 713 + ], + [ + 1984, + 712 + ], + [ + 1998, + 86 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 181, + 320 + ], + [ + 185, + 377 + ], + [ + 179, + 376 + ], + [ + 175, + 334 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 153, + 343 + ], + [ + 202, + 343 + ], + [ + 204, + 375 + ], + [ + 156, + 377 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 176, + 289 + ], + [ + 187, + 293 + ], + [ + 195, + 298 + ], + [ + 200, + 309 + ], + [ + 201, + 325 + ], + [ + 188, + 335 + ], + [ + 172, + 339 + ], + [ + 161, + 334 + ], + [ + 155, + 326 + ], + [ + 153, + 311 + ], + [ + 152, + 302 + ], + [ + 160, + 293 + ], + [ + 170, + 290 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 239, + 239 + ], + [ + 331, + 247 + ], + [ + 333, + 252 + ], + [ + 289, + 318 + ], + [ + 282, + 318 + ], + [ + 255, + 277 + ], + [ + 235, + 245 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 278, + 275 + ], + [ + 290, + 277 + ], + [ + 289, + 417 + ], + [ + 280, + 414 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1028, + 381 + ], + [ + 1058, + 383 + ], + [ + 1053, + 426 + ], + [ + 1045, + 422 + ], + [ + 1030, + 424 + ], + [ + 1025, + 420 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 641, + 402 + ], + [ + 677, + 398 + ], + [ + 716, + 399 + ], + [ + 737, + 400 + ], + [ + 749, + 403 + ], + [ + 755, + 414 + ], + [ + 764, + 435 + ], + [ + 767, + 438 + ], + [ + 775, + 438 + ], + [ + 783, + 439 + ], + [ + 783, + 451 + ], + [ + 771, + 453 + ], + [ + 774, + 471 + ], + [ + 774, + 516 + ], + [ + 772, + 528 + ], + [ + 771, + 536 + ], + [ + 766, + 544 + ], + [ + 742, + 544 + ], + [ + 734, + 531 + ], + [ + 731, + 526 + ], + [ + 711, + 526 + ], + [ + 687, + 526 + ], + [ + 677, + 526 + ], + [ + 667, + 525 + ], + [ + 649, + 528 + ], + [ + 641, + 529 + ], + [ + 637, + 529 + ], + [ + 632, + 530 + ], + [ + 631, + 538 + ], + [ + 629, + 547 + ], + [ + 623, + 546 + ], + [ + 611, + 546 + ], + [ + 607, + 540 + ], + [ + 606, + 522 + ], + [ + 607, + 486 + ], + [ + 608, + 464 + ], + [ + 615, + 450 + ], + [ + 608, + 451 + ], + [ + 599, + 449 + ], + [ + 598, + 441 + ], + [ + 602, + 436 + ], + [ + 615, + 437 + ], + [ + 620, + 438 + ], + [ + 624, + 425 + ], + [ + 629, + 414 + ], + [ + 633, + 408 + ], + [ + 635, + 407 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 660, + 464 + ], + [ + 710, + 464 + ], + [ + 709, + 477 + ], + [ + 658, + 478 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 295, + 607 + ], + [ + 441, + 578 + ], + [ + 493, + 573 + ], + [ + 492, + 569 + ], + [ + 485, + 563 + ], + [ + 477, + 551 + ], + [ + 460, + 537 + ], + [ + 443, + 531 + ], + [ + 428, + 524 + ], + [ + 417, + 521 + ], + [ + 407, + 516 + ], + [ + 381, + 516 + ], + [ + 353, + 521 + ], + [ + 317, + 521 + ], + [ + 293, + 515 + ], + [ + 270, + 516 + ], + [ + 245, + 516 + ], + [ + 214, + 501 + ], + [ + 172, + 502 + ], + [ + 136, + 516 + ], + [ + 79, + 525 + ], + [ + 52, + 520 + ], + [ + 29, + 504 + ], + [ + 3, + 497 + ], + [ + 0, + 496 + ], + [ + 1, + 610 + ], + [ + 71, + 610 + ], + [ + 160, + 610 + ], + [ + 227, + 610 + ], + [ + 262, + 613 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1100, + 443 + ], + [ + 1128, + 437 + ], + [ + 1158, + 437 + ], + [ + 1173, + 437 + ], + [ + 1188, + 435 + ], + [ + 1197, + 435 + ], + [ + 1215, + 436 + ], + [ + 1219, + 437 + ], + [ + 1220, + 446 + ], + [ + 1225, + 450 + ], + [ + 1230, + 450 + ], + [ + 1235, + 450 + ], + [ + 1238, + 441 + ], + [ + 1247, + 438 + ], + [ + 1290, + 435 + ], + [ + 1345, + 435 + ], + [ + 1376, + 430 + ], + [ + 1392, + 428 + ], + [ + 1410, + 433 + ], + [ + 1464, + 435 + ], + [ + 1503, + 433 + ], + [ + 1519, + 431 + ], + [ + 1588, + 436 + ], + [ + 1626, + 436 + ], + [ + 1641, + 433 + ], + [ + 1658, + 432 + ], + [ + 1672, + 434 + ], + [ + 1686, + 437 + ], + [ + 1699, + 437 + ], + [ + 1714, + 437 + ], + [ + 1726, + 437 + ], + [ + 1752, + 439 + ], + [ + 1767, + 444 + ], + [ + 1777, + 446 + ], + [ + 1816, + 447 + ], + [ + 1850, + 446 + ], + [ + 1868, + 447 + ], + [ + 1885, + 447 + ], + [ + 1906, + 445 + ], + [ + 1917, + 440 + ], + [ + 1935, + 440 + ], + [ + 1948, + 444 + ], + [ + 1973, + 449 + ], + [ + 2013, + 452 + ], + [ + 2032, + 437 + ], + [ + 2048, + 436 + ], + [ + 2048, + 611 + ], + [ + 2048, + 613 + ], + [ + 1541, + 581 + ], + [ + 1342, + 568 + ], + [ + 1306, + 560 + ], + [ + 1205, + 523 + ], + [ + 1207, + 514 + ], + [ + 1212, + 506 + ], + [ + 1220, + 497 + ], + [ + 1229, + 497 + ], + [ + 1249, + 493 + ], + [ + 1265, + 493 + ], + [ + 1274, + 493 + ], + [ + 1276, + 490 + ], + [ + 1132, + 482 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 973, + 432 + ], + [ + 979, + 426 + ], + [ + 987, + 422 + ], + [ + 1015, + 418 + ], + [ + 1054, + 417 + ], + [ + 1088, + 419 + ], + [ + 1098, + 418 + ], + [ + 1106, + 422 + ], + [ + 1120, + 445 + ], + [ + 1127, + 453 + ], + [ + 1133, + 454 + ], + [ + 1138, + 457 + ], + [ + 1140, + 463 + ], + [ + 1140, + 468 + ], + [ + 1139, + 472 + ], + [ + 1144, + 490 + ], + [ + 1148, + 505 + ], + [ + 1146, + 542 + ], + [ + 1146, + 558 + ], + [ + 1146, + 570 + ], + [ + 1143, + 575 + ], + [ + 1130, + 577 + ], + [ + 1117, + 574 + ], + [ + 1115, + 563 + ], + [ + 1114, + 558 + ], + [ + 1069, + 560 + ], + [ + 1025, + 562 + ], + [ + 1006, + 561 + ], + [ + 985, + 561 + ], + [ + 977, + 561 + ], + [ + 974, + 560 + ], + [ + 973, + 578 + ], + [ + 964, + 579 + ], + [ + 954, + 579 + ], + [ + 948, + 576 + ], + [ + 946, + 566 + ], + [ + 943, + 557 + ], + [ + 942, + 548 + ], + [ + 943, + 518 + ], + [ + 943, + 495 + ], + [ + 948, + 482 + ], + [ + 956, + 467 + ], + [ + 933, + 466 + ], + [ + 933, + 464 + ], + [ + 938, + 452 + ], + [ + 954, + 452 + ], + [ + 955, + 459 + ], + [ + 962, + 451 + ], + [ + 967, + 440 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1019, + 479 + ], + [ + 1080, + 480 + ], + [ + 1083, + 495 + ], + [ + 1018, + 494 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000004_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000004_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8afac4187eef2de9b9472986c0a433d15caa4614 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000004_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000005_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000005_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..6c34f7853da3d00d1e74f718bf4766bd0d852c0d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000005_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000005_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000005_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..06e0752891f63d4279368e2413128ac63fdd57a2 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000005_000019_gtFine_polygons.json @@ -0,0 +1,5677 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 670, + 2 + ], + [ + 1197, + 1 + ], + [ + 1168, + 413 + ], + [ + 755, + 441 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 0, + 595 + ], + [ + 940, + 425 + ], + [ + 1005, + 424 + ], + [ + 1118, + 427 + ], + [ + 1384, + 491 + ], + [ + 1565, + 634 + ], + [ + 1795, + 781 + ], + [ + 2048, + 901 + ], + [ + 2046, + 1024 + ], + [ + 0, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 874, + 425 + ], + [ + 998, + 422 + ], + [ + 983, + 434 + ], + [ + 962, + 436 + ], + [ + 944, + 437 + ], + [ + 933, + 440 + ], + [ + 914, + 439 + ], + [ + 895, + 446 + ], + [ + 875, + 440 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1047, + 381 + ], + [ + 1068, + 382 + ], + [ + 1085, + 435 + ], + [ + 1015, + 437 + ], + [ + 1008, + 436 + ], + [ + 996, + 436 + ], + [ + 994, + 429 + ], + [ + 992, + 421 + ], + [ + 992, + 408 + ], + [ + 1001, + 400 + ], + [ + 1011, + 400 + ], + [ + 1022, + 399 + ], + [ + 1030, + 396 + ], + [ + 1038, + 395 + ], + [ + 1043, + 391 + ], + [ + 1045, + 384 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 716, + 461 + ], + [ + 797, + 453 + ], + [ + 793, + 463 + ], + [ + 768, + 467 + ], + [ + 740, + 468 + ], + [ + 720, + 470 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 700, + 459 + ], + [ + 729, + 477 + ], + [ + 769, + 481 + ], + [ + 789, + 484 + ], + [ + 789, + 486 + ], + [ + 766, + 493 + ], + [ + 713, + 503 + ], + [ + 503, + 545 + ], + [ + 399, + 567 + ], + [ + 339, + 584 + ], + [ + 200, + 606 + ], + [ + 94, + 629 + ], + [ + 0, + 648 + ], + [ + 0, + 588 + ], + [ + 453, + 502 + ], + [ + 551, + 483 + ], + [ + 637, + 470 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 897, + 179 + ], + [ + 959, + 182 + ], + [ + 982, + 182 + ], + [ + 983, + 187 + ], + [ + 1015, + 187 + ], + [ + 1019, + 339 + ], + [ + 1037, + 339 + ], + [ + 1040, + 345 + ], + [ + 1046, + 348 + ], + [ + 1045, + 397 + ], + [ + 1052, + 430 + ], + [ + 1025, + 425 + ], + [ + 1003, + 427 + ], + [ + 991, + 425 + ], + [ + 882, + 435 + ], + [ + 766, + 433 + ], + [ + 741, + 347 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 745, + 51 + ], + [ + 760, + 51 + ], + [ + 770, + 51 + ], + [ + 778, + 56 + ], + [ + 782, + 61 + ], + [ + 786, + 70 + ], + [ + 787, + 80 + ], + [ + 796, + 84 + ], + [ + 807, + 82 + ], + [ + 808, + 73 + ], + [ + 811, + 65 + ], + [ + 816, + 63 + ], + [ + 833, + 62 + ], + [ + 835, + 64 + ], + [ + 838, + 76 + ], + [ + 844, + 86 + ], + [ + 851, + 92 + ], + [ + 870, + 104 + ], + [ + 879, + 112 + ], + [ + 885, + 118 + ], + [ + 893, + 128 + ], + [ + 900, + 133 + ], + [ + 910, + 140 + ], + [ + 920, + 145 + ], + [ + 932, + 154 + ], + [ + 945, + 159 + ], + [ + 952, + 160 + ], + [ + 957, + 172 + ], + [ + 951, + 181 + ], + [ + 959, + 187 + ], + [ + 964, + 193 + ], + [ + 960, + 202 + ], + [ + 957, + 220 + ], + [ + 965, + 229 + ], + [ + 968, + 251 + ], + [ + 967, + 264 + ], + [ + 973, + 269 + ], + [ + 976, + 270 + ], + [ + 978, + 279 + ], + [ + 979, + 291 + ], + [ + 981, + 297 + ], + [ + 983, + 309 + ], + [ + 984, + 336 + ], + [ + 973, + 349 + ], + [ + 964, + 352 + ], + [ + 945, + 356 + ], + [ + 928, + 367 + ], + [ + 930, + 429 + ], + [ + 924, + 428 + ], + [ + 923, + 362 + ], + [ + 903, + 368 + ], + [ + 908, + 439 + ], + [ + 888, + 436 + ], + [ + 843, + 444 + ], + [ + 782, + 458 + ], + [ + 717, + 463 + ], + [ + 716, + 351 + ], + [ + 746, + 89 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 2 + ], + [ + 750, + 1 + ], + [ + 751, + 27 + ], + [ + 753, + 33 + ], + [ + 762, + 327 + ], + [ + 767, + 334 + ], + [ + 768, + 377 + ], + [ + 761, + 374 + ], + [ + 758, + 377 + ], + [ + 734, + 380 + ], + [ + 735, + 387 + ], + [ + 726, + 390 + ], + [ + 728, + 474 + ], + [ + 728, + 482 + ], + [ + 643, + 491 + ], + [ + 442, + 511 + ], + [ + 371, + 527 + ], + [ + 331, + 535 + ], + [ + 51, + 574 + ], + [ + 0, + 580 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 427, + 344 + ], + [ + 432, + 542 + ], + [ + 430, + 542 + ], + [ + 418, + 342 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 394, + 473 + ], + [ + 403, + 475 + ], + [ + 406, + 483 + ], + [ + 411, + 487 + ], + [ + 426, + 485 + ], + [ + 425, + 480 + ], + [ + 421, + 478 + ], + [ + 428, + 469 + ], + [ + 440, + 461 + ], + [ + 451, + 471 + ], + [ + 458, + 510 + ], + [ + 441, + 538 + ], + [ + 435, + 546 + ], + [ + 419, + 548 + ], + [ + 403, + 541 + ], + [ + 388, + 528 + ], + [ + 384, + 514 + ], + [ + 387, + 505 + ], + [ + 379, + 503 + ], + [ + 379, + 499 + ], + [ + 392, + 492 + ], + [ + 390, + 487 + ], + [ + 387, + 485 + ], + [ + 387, + 477 + ], + [ + 389, + 473 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 412, + 371 + ], + [ + 441, + 368 + ], + [ + 439, + 384 + ], + [ + 432, + 384 + ], + [ + 430, + 394 + ], + [ + 415, + 394 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 434, + 325 + ], + [ + 441, + 325 + ], + [ + 450, + 326 + ], + [ + 457, + 333 + ], + [ + 443, + 346 + ], + [ + 435, + 351 + ], + [ + 430, + 345 + ], + [ + 432, + 329 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 460, + 336 + ], + [ + 465, + 340 + ], + [ + 469, + 350 + ], + [ + 469, + 354 + ], + [ + 465, + 363 + ], + [ + 459, + 367 + ], + [ + 449, + 368 + ], + [ + 440, + 365 + ], + [ + 437, + 357 + ], + [ + 436, + 352 + ], + [ + 437, + 343 + ], + [ + 441, + 338 + ], + [ + 446, + 335 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 408, + 347 + ], + [ + 444, + 346 + ], + [ + 441, + 367 + ], + [ + 409, + 367 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 418, + 315 + ], + [ + 424, + 315 + ], + [ + 431, + 316 + ], + [ + 437, + 323 + ], + [ + 441, + 334 + ], + [ + 440, + 344 + ], + [ + 436, + 347 + ], + [ + 424, + 348 + ], + [ + 414, + 348 + ], + [ + 408, + 344 + ], + [ + 407, + 336 + ], + [ + 407, + 328 + ], + [ + 409, + 322 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 406, + 293 + ], + [ + 441, + 294 + ], + [ + 438, + 318 + ], + [ + 408, + 316 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 426, + 263 + ], + [ + 436, + 270 + ], + [ + 441, + 275 + ], + [ + 439, + 284 + ], + [ + 436, + 291 + ], + [ + 432, + 294 + ], + [ + 424, + 294 + ], + [ + 418, + 294 + ], + [ + 412, + 289 + ], + [ + 407, + 284 + ], + [ + 408, + 274 + ], + [ + 412, + 267 + ], + [ + 421, + 262 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 534, + 330 + ], + [ + 534, + 367 + ], + [ + 505, + 367 + ], + [ + 506, + 326 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 649, + 379 + ], + [ + 646, + 422 + ], + [ + 643, + 420 + ], + [ + 643, + 372 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 644, + 356 + ], + [ + 651, + 361 + ], + [ + 654, + 367 + ], + [ + 655, + 374 + ], + [ + 653, + 379 + ], + [ + 651, + 381 + ], + [ + 644, + 383 + ], + [ + 639, + 383 + ], + [ + 637, + 377 + ], + [ + 636, + 367 + ], + [ + 639, + 359 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 823, + 174 + ], + [ + 831, + 173 + ], + [ + 850, + 445 + ], + [ + 838, + 426 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 884, + 403 + ], + [ + 887, + 455 + ], + [ + 857, + 457 + ], + [ + 852, + 402 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 869, + 432 + ], + [ + 867, + 457 + ], + [ + 865, + 458 + ], + [ + 865, + 432 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 891, + 421 + ], + [ + 892, + 455 + ], + [ + 894, + 455 + ], + [ + 893, + 426 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 754, + 428 + ], + [ + 771, + 427 + ], + [ + 801, + 428 + ], + [ + 800, + 455 + ], + [ + 781, + 468 + ], + [ + 754, + 469 + ], + [ + 754, + 463 + ], + [ + 754, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 889, + 260 + ], + [ + 868, + 268 + ], + [ + 797, + 286 + ], + [ + 781, + 296 + ], + [ + 778, + 308 + ], + [ + 783, + 486 + ], + [ + 779, + 485 + ], + [ + 772, + 310 + ], + [ + 775, + 298 + ], + [ + 784, + 288 + ], + [ + 798, + 284 + ], + [ + 848, + 270 + ], + [ + 877, + 262 + ], + [ + 884, + 260 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 893, + 250 + ], + [ + 897, + 256 + ], + [ + 898, + 284 + ], + [ + 895, + 290 + ], + [ + 884, + 290 + ], + [ + 879, + 286 + ], + [ + 877, + 258 + ], + [ + 879, + 252 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 786, + 333 + ], + [ + 785, + 361 + ], + [ + 772, + 361 + ], + [ + 772, + 332 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 784, + 370 + ], + [ + 782, + 380 + ], + [ + 792, + 380 + ], + [ + 790, + 401 + ], + [ + 773, + 400 + ], + [ + 771, + 371 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 916, + 419 + ], + [ + 928, + 419 + ], + [ + 944, + 420 + ], + [ + 955, + 423 + ], + [ + 959, + 434 + ], + [ + 954, + 439 + ], + [ + 934, + 441 + ], + [ + 922, + 442 + ], + [ + 906, + 437 + ], + [ + 904, + 426 + ], + [ + 908, + 419 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1077, + 437 + ], + [ + 1066, + 421 + ], + [ + 1061, + 411 + ], + [ + 1057, + 394 + ], + [ + 1052, + 388 + ], + [ + 1050, + 383 + ], + [ + 1048, + 381 + ], + [ + 1047, + 376 + ], + [ + 1053, + 371 + ], + [ + 1057, + 370 + ], + [ + 1062, + 364 + ], + [ + 1060, + 357 + ], + [ + 1059, + 353 + ], + [ + 1064, + 347 + ], + [ + 1069, + 339 + ], + [ + 1070, + 330 + ], + [ + 1066, + 327 + ], + [ + 1066, + 325 + ], + [ + 1072, + 320 + ], + [ + 1076, + 311 + ], + [ + 1069, + 307 + ], + [ + 1062, + 311 + ], + [ + 1060, + 311 + ], + [ + 1061, + 303 + ], + [ + 1065, + 297 + ], + [ + 1068, + 289 + ], + [ + 1065, + 279 + ], + [ + 1072, + 272 + ], + [ + 1082, + 268 + ], + [ + 1092, + 260 + ], + [ + 1190, + 238 + ], + [ + 1213, + 284 + ], + [ + 1214, + 435 + ], + [ + 1129, + 440 + ], + [ + 1080, + 447 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1167, + 178 + ], + [ + 1176, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 652 + ], + [ + 1482, + 507 + ], + [ + 1162, + 409 + ], + [ + 1160, + 279 + ], + [ + 1164, + 214 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1102, + 372 + ], + [ + 1139, + 373 + ], + [ + 1141, + 386 + ], + [ + 1102, + 390 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1129, + 396 + ], + [ + 1146, + 395 + ], + [ + 1146, + 421 + ], + [ + 1133, + 443 + ], + [ + 1122, + 440 + ], + [ + 1118, + 420 + ], + [ + 1118, + 406 + ], + [ + 1123, + 399 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1122, + 396 + ], + [ + 1118, + 433 + ], + [ + 1117, + 428 + ], + [ + 1119, + 392 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1111, + 401 + ], + [ + 1114, + 381 + ], + [ + 1115, + 375 + ], + [ + 1120, + 375 + ], + [ + 1123, + 381 + ], + [ + 1124, + 398 + ], + [ + 1124, + 410 + ], + [ + 1114, + 410 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1139, + 368 + ], + [ + 1138, + 408 + ], + [ + 1136, + 408 + ], + [ + 1137, + 364 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1145, + 343 + ], + [ + 1144, + 369 + ], + [ + 1130, + 369 + ], + [ + 1129, + 346 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1146, + 392 + ], + [ + 1146, + 400 + ], + [ + 1141, + 401 + ], + [ + 1136, + 401 + ], + [ + 1133, + 397 + ], + [ + 1132, + 393 + ], + [ + 1133, + 388 + ], + [ + 1135, + 388 + ], + [ + 1142, + 388 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1096, + 450 + ], + [ + 1102, + 420 + ], + [ + 1109, + 414 + ], + [ + 1118, + 419 + ], + [ + 1121, + 420 + ], + [ + 1128, + 422 + ], + [ + 1129, + 430 + ], + [ + 1132, + 440 + ], + [ + 1132, + 454 + ], + [ + 1123, + 454 + ], + [ + 1104, + 455 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1186, + 355 + ], + [ + 1187, + 384 + ], + [ + 1171, + 384 + ], + [ + 1170, + 367 + ], + [ + 1177, + 367 + ], + [ + 1177, + 355 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1178, + 492 + ], + [ + 1159, + 493 + ], + [ + 1204, + 523 + ], + [ + 1205, + 514 + ], + [ + 1209, + 510 + ], + [ + 1228, + 503 + ], + [ + 1209, + 481 + ], + [ + 1194, + 480 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1207, + 344 + ], + [ + 1211, + 414 + ], + [ + 1211, + 414 + ], + [ + 1207, + 339 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 800, + 425 + ], + [ + 822, + 422 + ], + [ + 841, + 423 + ], + [ + 856, + 435 + ], + [ + 861, + 437 + ], + [ + 864, + 440 + ], + [ + 862, + 444 + ], + [ + 866, + 455 + ], + [ + 866, + 472 + ], + [ + 865, + 481 + ], + [ + 855, + 483 + ], + [ + 848, + 485 + ], + [ + 847, + 478 + ], + [ + 833, + 478 + ], + [ + 810, + 481 + ], + [ + 808, + 487 + ], + [ + 801, + 488 + ], + [ + 787, + 489 + ], + [ + 784, + 477 + ], + [ + 783, + 456 + ], + [ + 789, + 436 + ], + [ + 794, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 549, + 435 + ], + [ + 559, + 419 + ], + [ + 570, + 412 + ], + [ + 584, + 409 + ], + [ + 594, + 409 + ], + [ + 637, + 407 + ], + [ + 650, + 404 + ], + [ + 671, + 408 + ], + [ + 685, + 416 + ], + [ + 701, + 434 + ], + [ + 709, + 441 + ], + [ + 710, + 446 + ], + [ + 717, + 456 + ], + [ + 726, + 474 + ], + [ + 726, + 508 + ], + [ + 724, + 521 + ], + [ + 716, + 523 + ], + [ + 707, + 520 + ], + [ + 704, + 513 + ], + [ + 703, + 508 + ], + [ + 690, + 510 + ], + [ + 689, + 519 + ], + [ + 685, + 528 + ], + [ + 675, + 531 + ], + [ + 665, + 526 + ], + [ + 661, + 519 + ], + [ + 657, + 514 + ], + [ + 623, + 517 + ], + [ + 597, + 514 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 462, + 438 + ], + [ + 498, + 432 + ], + [ + 534, + 430 + ], + [ + 570, + 430 + ], + [ + 590, + 446 + ], + [ + 605, + 458 + ], + [ + 616, + 458 + ], + [ + 617, + 464 + ], + [ + 617, + 470 + ], + [ + 614, + 473 + ], + [ + 621, + 492 + ], + [ + 626, + 522 + ], + [ + 624, + 538 + ], + [ + 619, + 541 + ], + [ + 609, + 543 + ], + [ + 605, + 538 + ], + [ + 593, + 538 + ], + [ + 588, + 538 + ], + [ + 588, + 545 + ], + [ + 585, + 548 + ], + [ + 575, + 552 + ], + [ + 565, + 550 + ], + [ + 561, + 543 + ], + [ + 541, + 543 + ], + [ + 515, + 543 + ], + [ + 512, + 548 + ], + [ + 506, + 551 + ], + [ + 498, + 551 + ], + [ + 491, + 544 + ], + [ + 475, + 544 + ], + [ + 468, + 544 + ], + [ + 467, + 551 + ], + [ + 464, + 560 + ], + [ + 452, + 561 + ], + [ + 438, + 553 + ], + [ + 435, + 510 + ], + [ + 436, + 483 + ], + [ + 444, + 460 + ], + [ + 452, + 447 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 87, + 370 + ], + [ + 150, + 370 + ], + [ + 216, + 377 + ], + [ + 243, + 385 + ], + [ + 274, + 418 + ], + [ + 293, + 448 + ], + [ + 302, + 463 + ], + [ + 308, + 457 + ], + [ + 319, + 456 + ], + [ + 330, + 466 + ], + [ + 329, + 479 + ], + [ + 326, + 482 + ], + [ + 339, + 496 + ], + [ + 349, + 524 + ], + [ + 351, + 562 + ], + [ + 350, + 592 + ], + [ + 345, + 608 + ], + [ + 330, + 613 + ], + [ + 319, + 610 + ], + [ + 310, + 602 + ], + [ + 303, + 591 + ], + [ + 276, + 597 + ], + [ + 228, + 601 + ], + [ + 228, + 616 + ], + [ + 222, + 634 + ], + [ + 208, + 644 + ], + [ + 191, + 642 + ], + [ + 176, + 632 + ], + [ + 168, + 615 + ], + [ + 164, + 615 + ], + [ + 156, + 623 + ], + [ + 137, + 625 + ], + [ + 123, + 620 + ], + [ + 116, + 613 + ], + [ + 67, + 614 + ], + [ + 29, + 620 + ], + [ + 12, + 622 + ], + [ + 11, + 628 + ], + [ + 5, + 652 + ], + [ + 0, + 651 + ], + [ + 1, + 379 + ], + [ + 21, + 375 + ], + [ + 67, + 371 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 938, + 452 + ], + [ + 938, + 447 + ], + [ + 934, + 445 + ], + [ + 939, + 432 + ], + [ + 937, + 425 + ], + [ + 939, + 422 + ], + [ + 946, + 423 + ], + [ + 951, + 428 + ], + [ + 954, + 434 + ], + [ + 954, + 445 + ], + [ + 953, + 456 + ], + [ + 952, + 460 + ], + [ + 960, + 473 + ], + [ + 960, + 477 + ], + [ + 955, + 483 + ], + [ + 951, + 480 + ], + [ + 952, + 476 + ], + [ + 951, + 471 + ], + [ + 947, + 467 + ], + [ + 942, + 474 + ], + [ + 939, + 480 + ], + [ + 938, + 482 + ], + [ + 933, + 482 + ], + [ + 930, + 481 + ], + [ + 927, + 475 + ], + [ + 936, + 473 + ], + [ + 937, + 464 + ], + [ + 937, + 459 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1005, + 457 + ], + [ + 1007, + 448 + ], + [ + 999, + 446 + ], + [ + 999, + 440 + ], + [ + 1007, + 438 + ], + [ + 1013, + 435 + ], + [ + 1017, + 427 + ], + [ + 1024, + 420 + ], + [ + 1051, + 418 + ], + [ + 1079, + 419 + ], + [ + 1094, + 423 + ], + [ + 1103, + 435 + ], + [ + 1112, + 439 + ], + [ + 1113, + 443 + ], + [ + 1109, + 449 + ], + [ + 1108, + 468 + ], + [ + 1109, + 492 + ], + [ + 1111, + 501 + ], + [ + 1102, + 504 + ], + [ + 1093, + 500 + ], + [ + 1092, + 495 + ], + [ + 1062, + 492 + ], + [ + 1037, + 494 + ], + [ + 1024, + 494 + ], + [ + 1020, + 495 + ], + [ + 1020, + 504 + ], + [ + 1017, + 504 + ], + [ + 1009, + 504 + ], + [ + 1005, + 501 + ], + [ + 1005, + 486 + ], + [ + 1004, + 467 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1183, + 336 + ], + [ + 1207, + 336 + ], + [ + 1208, + 347 + ], + [ + 1183, + 347 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1055, + 249 + ], + [ + 1069, + 249 + ], + [ + 1072, + 252 + ], + [ + 1071, + 282 + ], + [ + 1067, + 286 + ], + [ + 1056, + 286 + ], + [ + 1050, + 282 + ], + [ + 1050, + 251 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1178, + 492 + ], + [ + 1159, + 493 + ], + [ + 1204, + 523 + ], + [ + 1205, + 514 + ], + [ + 1209, + 510 + ], + [ + 1228, + 503 + ], + [ + 1209, + 481 + ], + [ + 1194, + 480 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1202, + 332 + ], + [ + 1211, + 334 + ], + [ + 1209, + 386 + ], + [ + 1199, + 388 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1155, + 311 + ], + [ + 1146, + 308 + ], + [ + 1134, + 311 + ], + [ + 1120, + 312 + ], + [ + 1107, + 307 + ], + [ + 1096, + 305 + ], + [ + 1076, + 304 + ], + [ + 1073, + 292 + ], + [ + 1085, + 272 + ], + [ + 1084, + 262 + ], + [ + 1077, + 253 + ], + [ + 1077, + 246 + ], + [ + 1080, + 245 + ], + [ + 1089, + 243 + ], + [ + 1098, + 243 + ], + [ + 1098, + 238 + ], + [ + 1094, + 230 + ], + [ + 1090, + 223 + ], + [ + 1084, + 223 + ], + [ + 1083, + 217 + ], + [ + 1086, + 204 + ], + [ + 1080, + 193 + ], + [ + 1074, + 188 + ], + [ + 1069, + 180 + ], + [ + 1069, + 170 + ], + [ + 1076, + 164 + ], + [ + 1072, + 157 + ], + [ + 1070, + 151 + ], + [ + 1069, + 149 + ], + [ + 1077, + 146 + ], + [ + 1085, + 149 + ], + [ + 1096, + 144 + ], + [ + 1095, + 137 + ], + [ + 1097, + 136 + ], + [ + 1107, + 133 + ], + [ + 1114, + 130 + ], + [ + 1142, + 119 + ], + [ + 1195, + 108 + ], + [ + 1239, + 122 + ], + [ + 1278, + 137 + ], + [ + 1281, + 147 + ], + [ + 1296, + 159 + ], + [ + 1305, + 168 + ], + [ + 1311, + 178 + ], + [ + 1316, + 193 + ], + [ + 1321, + 201 + ], + [ + 1322, + 203 + ], + [ + 1320, + 215 + ], + [ + 1311, + 230 + ], + [ + 1308, + 247 + ], + [ + 1293, + 249 + ], + [ + 1286, + 254 + ], + [ + 1278, + 253 + ], + [ + 1273, + 247 + ], + [ + 1265, + 242 + ], + [ + 1253, + 244 + ], + [ + 1236, + 242 + ], + [ + 1228, + 240 + ], + [ + 1226, + 248 + ], + [ + 1223, + 253 + ], + [ + 1219, + 258 + ], + [ + 1219, + 263 + ], + [ + 1220, + 271 + ], + [ + 1223, + 278 + ], + [ + 1218, + 284 + ], + [ + 1219, + 287 + ], + [ + 1225, + 285 + ], + [ + 1233, + 285 + ], + [ + 1235, + 287 + ], + [ + 1225, + 301 + ], + [ + 1224, + 310 + ], + [ + 1225, + 318 + ], + [ + 1218, + 322 + ], + [ + 1221, + 372 + ], + [ + 1237, + 368 + ], + [ + 1245, + 368 + ], + [ + 1249, + 373 + ], + [ + 1242, + 394 + ], + [ + 1234, + 407 + ], + [ + 1215, + 419 + ], + [ + 1207, + 417 + ], + [ + 1203, + 402 + ], + [ + 1204, + 385 + ], + [ + 1200, + 377 + ], + [ + 1197, + 372 + ], + [ + 1201, + 366 + ], + [ + 1207, + 360 + ], + [ + 1209, + 343 + ], + [ + 1210, + 329 + ], + [ + 1210, + 321 + ], + [ + 1204, + 316 + ], + [ + 1201, + 321 + ], + [ + 1196, + 327 + ], + [ + 1184, + 329 + ], + [ + 1175, + 332 + ], + [ + 1174, + 324 + ], + [ + 1172, + 317 + ], + [ + 1167, + 315 + ], + [ + 1160, + 318 + ], + [ + 1165, + 407 + ], + [ + 1159, + 408 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1413, + 263 + ], + [ + 1413, + 291 + ], + [ + 1349, + 313 + ], + [ + 1349, + 304 + ], + [ + 1349, + 287 + ], + [ + 1378, + 278 + ], + [ + 1406, + 266 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1317, + 286 + ], + [ + 1335, + 283 + ], + [ + 1364, + 266 + ], + [ + 1403, + 263 + ], + [ + 1364, + 285 + ], + [ + 1339, + 293 + ], + [ + 1318, + 293 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1152, + 400 + ], + [ + 1179, + 396 + ], + [ + 1203, + 397 + ], + [ + 1209, + 401 + ], + [ + 1212, + 423 + ], + [ + 1147, + 478 + ], + [ + 1138, + 480 + ], + [ + 1131, + 477 + ], + [ + 1129, + 464 + ], + [ + 1127, + 444 + ], + [ + 1129, + 420 + ], + [ + 1131, + 406 + ], + [ + 1140, + 400 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1281, + 565 + ], + [ + 1405, + 641 + ], + [ + 1405, + 624 + ], + [ + 1451, + 608 + ], + [ + 1508, + 586 + ], + [ + 1453, + 486 + ], + [ + 1306, + 533 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1281, + 565 + ], + [ + 1405, + 641 + ], + [ + 1405, + 624 + ], + [ + 1451, + 608 + ], + [ + 1508, + 586 + ], + [ + 1453, + 486 + ], + [ + 1306, + 533 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1159, + 422 + ], + [ + 1182, + 418 + ], + [ + 1212, + 417 + ], + [ + 1191, + 468 + ], + [ + 1163, + 486 + ], + [ + 1149, + 485 + ], + [ + 1147, + 483 + ], + [ + 1144, + 460 + ], + [ + 1147, + 441 + ], + [ + 1152, + 431 + ], + [ + 1155, + 424 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1185, + 491 + ], + [ + 1177, + 494 + ], + [ + 1156, + 494 + ], + [ + 1151, + 490 + ], + [ + 1151, + 481 + ], + [ + 1164, + 476 + ], + [ + 1183, + 474 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1204, + 413 + ], + [ + 1216, + 409 + ], + [ + 1262, + 408 + ], + [ + 1266, + 409 + ], + [ + 1275, + 417 + ], + [ + 1207, + 496 + ], + [ + 1197, + 499 + ], + [ + 1186, + 499 + ], + [ + 1178, + 495 + ], + [ + 1175, + 477 + ], + [ + 1177, + 461 + ], + [ + 1175, + 454 + ], + [ + 1168, + 452 + ], + [ + 1169, + 445 + ], + [ + 1175, + 442 + ], + [ + 1181, + 444 + ], + [ + 1183, + 446 + ], + [ + 1190, + 427 + ], + [ + 1197, + 416 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1210, + 437 + ], + [ + 1215, + 428 + ], + [ + 1227, + 418 + ], + [ + 1255, + 414 + ], + [ + 1288, + 411 + ], + [ + 1311, + 419 + ], + [ + 1281, + 496 + ], + [ + 1228, + 507 + ], + [ + 1213, + 507 + ], + [ + 1205, + 504 + ], + [ + 1200, + 494 + ], + [ + 1200, + 475 + ], + [ + 1202, + 452 + ], + [ + 1197, + 451 + ], + [ + 1193, + 448 + ], + [ + 1196, + 441 + ], + [ + 1201, + 440 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1290, + 315 + ], + [ + 1294, + 448 + ], + [ + 1286, + 456 + ], + [ + 1284, + 318 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1287, + 282 + ], + [ + 1295, + 288 + ], + [ + 1300, + 298 + ], + [ + 1300, + 309 + ], + [ + 1294, + 321 + ], + [ + 1285, + 324 + ], + [ + 1277, + 322 + ], + [ + 1268, + 313 + ], + [ + 1267, + 300 + ], + [ + 1273, + 286 + ], + [ + 1280, + 284 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1294, + 561 + ], + [ + 1278, + 566 + ], + [ + 1264, + 561 + ], + [ + 1239, + 545 + ], + [ + 1213, + 528 + ], + [ + 1203, + 523 + ], + [ + 1203, + 517 + ], + [ + 1211, + 509 + ], + [ + 1236, + 504 + ], + [ + 1294, + 499 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1321, + 437 + ], + [ + 1287, + 519 + ], + [ + 1241, + 526 + ], + [ + 1223, + 519 + ], + [ + 1221, + 512 + ], + [ + 1225, + 495 + ], + [ + 1232, + 484 + ], + [ + 1245, + 468 + ], + [ + 1254, + 454 + ], + [ + 1264, + 447 + ], + [ + 1299, + 439 + ], + [ + 1312, + 407 + ], + [ + 1308, + 356 + ], + [ + 1308, + 252 + ], + [ + 1302, + 141 + ], + [ + 1296, + 132 + ], + [ + 1284, + 139 + ], + [ + 1265, + 153 + ], + [ + 1242, + 147 + ], + [ + 1230, + 147 + ], + [ + 1211, + 166 + ], + [ + 1189, + 182 + ], + [ + 1164, + 167 + ], + [ + 1155, + 147 + ], + [ + 1141, + 136 + ], + [ + 1137, + 130 + ], + [ + 1132, + 126 + ], + [ + 1123, + 127 + ], + [ + 1117, + 120 + ], + [ + 1111, + 122 + ], + [ + 1106, + 123 + ], + [ + 1102, + 117 + ], + [ + 1097, + 113 + ], + [ + 1091, + 112 + ], + [ + 1087, + 104 + ], + [ + 1091, + 93 + ], + [ + 1109, + 78 + ], + [ + 1120, + 71 + ], + [ + 1119, + 64 + ], + [ + 1113, + 55 + ], + [ + 1112, + 50 + ], + [ + 1114, + 45 + ], + [ + 1118, + 43 + ], + [ + 1125, + 40 + ], + [ + 1128, + 33 + ], + [ + 1142, + 27 + ], + [ + 1145, + 18 + ], + [ + 1139, + 14 + ], + [ + 1132, + 12 + ], + [ + 1130, + 0 + ], + [ + 1146, + 0 + ], + [ + 1430, + 0 + ], + [ + 1430, + 3 + ], + [ + 1427, + 10 + ], + [ + 1426, + 20 + ], + [ + 1420, + 30 + ], + [ + 1423, + 34 + ], + [ + 1422, + 39 + ], + [ + 1418, + 46 + ], + [ + 1417, + 50 + ], + [ + 1391, + 53 + ], + [ + 1386, + 57 + ], + [ + 1391, + 60 + ], + [ + 1398, + 62 + ], + [ + 1404, + 59 + ], + [ + 1411, + 57 + ], + [ + 1419, + 56 + ], + [ + 1431, + 51 + ], + [ + 1439, + 51 + ], + [ + 1445, + 55 + ], + [ + 1443, + 63 + ], + [ + 1429, + 75 + ], + [ + 1426, + 78 + ], + [ + 1433, + 85 + ], + [ + 1427, + 98 + ], + [ + 1413, + 99 + ], + [ + 1413, + 110 + ], + [ + 1414, + 123 + ], + [ + 1400, + 131 + ], + [ + 1391, + 122 + ], + [ + 1378, + 119 + ], + [ + 1363, + 119 + ], + [ + 1349, + 119 + ], + [ + 1347, + 126 + ], + [ + 1342, + 131 + ], + [ + 1332, + 137 + ], + [ + 1333, + 145 + ], + [ + 1326, + 152 + ], + [ + 1318, + 154 + ], + [ + 1320, + 201 + ], + [ + 1321, + 257 + ], + [ + 1324, + 340 + ], + [ + 1324, + 418 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1377, + 336 + ], + [ + 1384, + 339 + ], + [ + 1411, + 338 + ], + [ + 1445, + 333 + ], + [ + 1491, + 332 + ], + [ + 1523, + 329 + ], + [ + 1547, + 327 + ], + [ + 1567, + 333 + ], + [ + 1590, + 349 + ], + [ + 1609, + 381 + ], + [ + 1628, + 421 + ], + [ + 1640, + 491 + ], + [ + 1634, + 536 + ], + [ + 1576, + 561 + ], + [ + 1445, + 574 + ], + [ + 1384, + 583 + ], + [ + 1383, + 594 + ], + [ + 1376, + 612 + ], + [ + 1365, + 615 + ], + [ + 1341, + 614 + ], + [ + 1333, + 611 + ], + [ + 1329, + 588 + ], + [ + 1328, + 568 + ], + [ + 1323, + 564 + ], + [ + 1320, + 577 + ], + [ + 1313, + 587 + ], + [ + 1295, + 584 + ], + [ + 1286, + 581 + ], + [ + 1280, + 553 + ], + [ + 1277, + 519 + ], + [ + 1278, + 487 + ], + [ + 1284, + 456 + ], + [ + 1285, + 446 + ], + [ + 1280, + 442 + ], + [ + 1279, + 434 + ], + [ + 1287, + 424 + ], + [ + 1298, + 424 + ], + [ + 1304, + 406 + ], + [ + 1315, + 377 + ], + [ + 1326, + 357 + ], + [ + 1342, + 345 + ], + [ + 1363, + 337 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1451, + 475 + ], + [ + 1543, + 468 + ], + [ + 1541, + 489 + ], + [ + 1456, + 496 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1434, + 608 + ], + [ + 1474, + 597 + ], + [ + 1668, + 567 + ], + [ + 1900, + 545 + ], + [ + 2048, + 586 + ], + [ + 2048, + 687 + ], + [ + 1708, + 704 + ], + [ + 1575, + 708 + ], + [ + 1529, + 728 + ], + [ + 1419, + 664 + ], + [ + 1401, + 649 + ], + [ + 1406, + 625 + ], + [ + 1418, + 615 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1686, + 0 + ], + [ + 1729, + 0 + ], + [ + 1738, + 135 + ], + [ + 1749, + 290 + ], + [ + 1760, + 497 + ], + [ + 1760, + 567 + ], + [ + 1705, + 568 + ], + [ + 1703, + 255 + ], + [ + 1695, + 161 + ], + [ + 1695, + 76 + ], + [ + 1692, + 32 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1681, + 428 + ], + [ + 1681, + 449 + ], + [ + 1671, + 450 + ], + [ + 1670, + 427 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 1603, + 378 + ], + [ + 1615, + 371 + ], + [ + 1629, + 372 + ], + [ + 1641, + 381 + ], + [ + 1645, + 390 + ], + [ + 1638, + 402 + ], + [ + 1628, + 421 + ], + [ + 1631, + 436 + ], + [ + 1644, + 453 + ], + [ + 1658, + 441 + ], + [ + 1677, + 441 + ], + [ + 1687, + 441 + ], + [ + 1700, + 451 + ], + [ + 1721, + 478 + ], + [ + 1764, + 472 + ], + [ + 1790, + 471 + ], + [ + 1803, + 477 + ], + [ + 1819, + 495 + ], + [ + 1783, + 588 + ], + [ + 1722, + 588 + ], + [ + 1692, + 579 + ], + [ + 1676, + 560 + ], + [ + 1662, + 554 + ], + [ + 1648, + 548 + ], + [ + 1649, + 564 + ], + [ + 1625, + 593 + ], + [ + 1605, + 602 + ], + [ + 1579, + 603 + ], + [ + 1559, + 597 + ], + [ + 1537, + 619 + ], + [ + 1509, + 626 + ], + [ + 1494, + 627 + ], + [ + 1485, + 622 + ], + [ + 1478, + 608 + ], + [ + 1475, + 589 + ], + [ + 1474, + 577 + ], + [ + 1474, + 565 + ], + [ + 1488, + 555 + ], + [ + 1505, + 536 + ], + [ + 1514, + 530 + ], + [ + 1514, + 517 + ], + [ + 1520, + 500 + ], + [ + 1536, + 484 + ], + [ + 1571, + 470 + ], + [ + 1584, + 466 + ], + [ + 1580, + 458 + ], + [ + 1570, + 455 + ], + [ + 1559, + 446 + ], + [ + 1556, + 440 + ], + [ + 1556, + 399 + ], + [ + 1567, + 384 + ], + [ + 1578, + 377 + ], + [ + 1594, + 377 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1696, + 428 + ], + [ + 1716, + 422 + ], + [ + 1743, + 426 + ], + [ + 1770, + 426 + ], + [ + 1775, + 429 + ], + [ + 1768, + 439 + ], + [ + 1744, + 449 + ], + [ + 1747, + 458 + ], + [ + 1836, + 445 + ], + [ + 1880, + 422 + ], + [ + 1903, + 472 + ], + [ + 1881, + 580 + ], + [ + 1780, + 630 + ], + [ + 1708, + 644 + ], + [ + 1640, + 649 + ], + [ + 1616, + 643 + ], + [ + 1589, + 609 + ], + [ + 1586, + 565 + ], + [ + 1593, + 533 + ], + [ + 1596, + 520 + ], + [ + 1610, + 505 + ], + [ + 1625, + 487 + ], + [ + 1639, + 477 + ], + [ + 1709, + 476 + ], + [ + 1700, + 464 + ], + [ + 1681, + 458 + ], + [ + 1674, + 451 + ], + [ + 1680, + 439 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1730, + 408 + ], + [ + 1752, + 412 + ], + [ + 1767, + 408 + ], + [ + 1765, + 402 + ], + [ + 1753, + 392 + ], + [ + 1765, + 380 + ], + [ + 1780, + 377 + ], + [ + 1789, + 373 + ], + [ + 1793, + 367 + ], + [ + 1805, + 353 + ], + [ + 1786, + 342 + ], + [ + 1749, + 337 + ], + [ + 1727, + 331 + ], + [ + 1713, + 323 + ], + [ + 1708, + 311 + ], + [ + 1725, + 302 + ], + [ + 1750, + 304 + ], + [ + 1779, + 315 + ], + [ + 1806, + 320 + ], + [ + 1820, + 322 + ], + [ + 1863, + 317 + ], + [ + 1857, + 309 + ], + [ + 1860, + 291 + ], + [ + 1869, + 279 + ], + [ + 1870, + 244 + ], + [ + 1817, + 235 + ], + [ + 1795, + 228 + ], + [ + 1786, + 222 + ], + [ + 1827, + 214 + ], + [ + 1882, + 218 + ], + [ + 1912, + 227 + ], + [ + 1943, + 241 + ], + [ + 1966, + 245 + ], + [ + 1971, + 222 + ], + [ + 1968, + 200 + ], + [ + 1976, + 189 + ], + [ + 2009, + 205 + ], + [ + 2032, + 219 + ], + [ + 2048, + 242 + ], + [ + 2048, + 474 + ], + [ + 2000, + 519 + ], + [ + 1884, + 554 + ], + [ + 1780, + 518 + ], + [ + 1756, + 495 + ], + [ + 1694, + 487 + ], + [ + 1670, + 497 + ], + [ + 1653, + 510 + ], + [ + 1626, + 516 + ], + [ + 1603, + 514 + ], + [ + 1575, + 505 + ], + [ + 1570, + 502 + ], + [ + 1578, + 491 + ], + [ + 1608, + 490 + ], + [ + 1626, + 487 + ], + [ + 1625, + 471 + ], + [ + 1609, + 455 + ], + [ + 1601, + 437 + ], + [ + 1591, + 422 + ], + [ + 1616, + 418 + ], + [ + 1644, + 427 + ], + [ + 1669, + 434 + ], + [ + 1699, + 425 + ], + [ + 1710, + 418 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1890, + 66 + ], + [ + 1900, + 322 + ], + [ + 1879, + 326 + ], + [ + 1861, + 11 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1790, + 0 + ], + [ + 1965, + 2 + ], + [ + 1966, + 65 + ], + [ + 1958, + 72 + ], + [ + 1808, + 85 + ], + [ + 1795, + 79 + ], + [ + 1794, + 47 + ], + [ + 1793, + 22 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1474, + 560 + ], + [ + 1488, + 565 + ], + [ + 1493, + 583 + ], + [ + 1530, + 597 + ], + [ + 1529, + 578 + ], + [ + 1551, + 578 + ], + [ + 1552, + 601 + ], + [ + 1613, + 618 + ], + [ + 1614, + 595 + ], + [ + 1632, + 598 + ], + [ + 1638, + 686 + ], + [ + 1613, + 683 + ], + [ + 1611, + 635 + ], + [ + 1555, + 616 + ], + [ + 1553, + 653 + ], + [ + 1536, + 648 + ], + [ + 1534, + 613 + ], + [ + 1499, + 602 + ], + [ + 1496, + 632 + ], + [ + 1477, + 628 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1583, + 704 + ], + [ + 2048, + 685 + ], + [ + 2046, + 1024 + ], + [ + 1950, + 1020 + ], + [ + 1720, + 860 + ], + [ + 1530, + 729 + ], + [ + 1571, + 706 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 1846, + 403 + ], + [ + 1858, + 406 + ], + [ + 1867, + 413 + ], + [ + 1880, + 418 + ], + [ + 1900, + 420 + ], + [ + 1911, + 408 + ], + [ + 1928, + 404 + ], + [ + 1944, + 406 + ], + [ + 1965, + 417 + ], + [ + 1980, + 418 + ], + [ + 1990, + 415 + ], + [ + 2005, + 409 + ], + [ + 2016, + 410 + ], + [ + 2029, + 414 + ], + [ + 2027, + 428 + ], + [ + 2018, + 433 + ], + [ + 2000, + 435 + ], + [ + 1976, + 442 + ], + [ + 1985, + 450 + ], + [ + 2001, + 451 + ], + [ + 2026, + 456 + ], + [ + 2048, + 461 + ], + [ + 2048, + 661 + ], + [ + 1984, + 675 + ], + [ + 1894, + 672 + ], + [ + 1873, + 658 + ], + [ + 1866, + 638 + ], + [ + 1859, + 616 + ], + [ + 1842, + 609 + ], + [ + 1836, + 615 + ], + [ + 1828, + 646 + ], + [ + 1796, + 694 + ], + [ + 1778, + 710 + ], + [ + 1753, + 726 + ], + [ + 1724, + 728 + ], + [ + 1699, + 693 + ], + [ + 1701, + 664 + ], + [ + 1713, + 629 + ], + [ + 1727, + 602 + ], + [ + 1735, + 582 + ], + [ + 1779, + 549 + ], + [ + 1788, + 527 + ], + [ + 1802, + 499 + ], + [ + 1799, + 485 + ], + [ + 1799, + 462 + ], + [ + 1801, + 452 + ], + [ + 1819, + 451 + ], + [ + 1832, + 449 + ], + [ + 1832, + 441 + ], + [ + 1823, + 432 + ], + [ + 1823, + 418 + ], + [ + 1837, + 402 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000006_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000006_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..68d8003a2fb395376d47c410f993920986046793 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000006_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000007_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000007_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..210b8e57cbcbd5e203994635d8d36aab4ec9c5f0 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000007_000019_gtFine_polygons.json @@ -0,0 +1,4068 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 1186, + 451 + ], + [ + 1315, + 452 + ], + [ + 1579, + 518 + ], + [ + 1914, + 619 + ], + [ + 1987, + 639 + ], + [ + 2048, + 654 + ], + [ + 2048, + 1024 + ], + [ + 2, + 1024 + ], + [ + 1, + 739 + ], + [ + 855, + 519 + ], + [ + 1137, + 456 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1165, + 309 + ], + [ + 1156, + 424 + ], + [ + 728, + 503 + ], + [ + 0, + 654 + ], + [ + 0, + 0 + ], + [ + 891, + 0 + ], + [ + 1059, + 172 + ], + [ + 1126, + 249 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1313, + 450 + ], + [ + 1311, + 283 + ], + [ + 1466, + 84 + ], + [ + 1556, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 597 + ], + [ + 1405, + 484 + ], + [ + 1327, + 465 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1149, + 2 + ], + [ + 1323, + 1 + ], + [ + 1309, + 456 + ], + [ + 1244, + 459 + ], + [ + 1211, + 454 + ], + [ + 1148, + 441 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1263, + 442 + ], + [ + 1287, + 444 + ], + [ + 1294, + 449 + ], + [ + 1286, + 465 + ], + [ + 1274, + 466 + ], + [ + 1269, + 462 + ], + [ + 1259, + 462 + ], + [ + 1252, + 464 + ], + [ + 1242, + 465 + ], + [ + 1237, + 459 + ], + [ + 1238, + 452 + ], + [ + 1250, + 448 + ], + [ + 1255, + 446 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1303, + 421 + ], + [ + 1298, + 421 + ], + [ + 1292, + 414 + ], + [ + 1287, + 405 + ], + [ + 1277, + 399 + ], + [ + 1262, + 391 + ], + [ + 1244, + 381 + ], + [ + 1231, + 357 + ], + [ + 1228, + 332 + ], + [ + 1246, + 299 + ], + [ + 1314, + 234 + ], + [ + 1338, + 228 + ], + [ + 1363, + 302 + ], + [ + 1354, + 343 + ], + [ + 1360, + 353 + ], + [ + 1359, + 362 + ], + [ + 1347, + 373 + ], + [ + 1348, + 379 + ], + [ + 1353, + 384 + ], + [ + 1355, + 389 + ], + [ + 1355, + 396 + ], + [ + 1350, + 404 + ], + [ + 1345, + 408 + ], + [ + 1340, + 419 + ], + [ + 1334, + 444 + ], + [ + 1314, + 453 + ], + [ + 1306, + 441 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1211, + 437 + ], + [ + 1217, + 442 + ], + [ + 1221, + 447 + ], + [ + 1222, + 460 + ], + [ + 1218, + 467 + ], + [ + 1210, + 470 + ], + [ + 1195, + 468 + ], + [ + 1193, + 453 + ], + [ + 1195, + 443 + ], + [ + 1199, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1159, + 422 + ], + [ + 1172, + 420 + ], + [ + 1194, + 422 + ], + [ + 1197, + 429 + ], + [ + 1199, + 442 + ], + [ + 1201, + 460 + ], + [ + 1201, + 468 + ], + [ + 1198, + 475 + ], + [ + 1187, + 473 + ], + [ + 1182, + 472 + ], + [ + 1161, + 455 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1142, + 408 + ], + [ + 1138, + 379 + ], + [ + 1137, + 362 + ], + [ + 1135, + 348 + ], + [ + 1131, + 344 + ], + [ + 1124, + 340 + ], + [ + 1117, + 336 + ], + [ + 1109, + 326 + ], + [ + 1101, + 317 + ], + [ + 1095, + 303 + ], + [ + 1089, + 286 + ], + [ + 1080, + 273 + ], + [ + 1072, + 258 + ], + [ + 1069, + 222 + ], + [ + 1096, + 133 + ], + [ + 1142, + 102 + ], + [ + 1215, + 99 + ], + [ + 1254, + 127 + ], + [ + 1278, + 155 + ], + [ + 1303, + 188 + ], + [ + 1308, + 228 + ], + [ + 1304, + 268 + ], + [ + 1308, + 285 + ], + [ + 1301, + 303 + ], + [ + 1277, + 325 + ], + [ + 1267, + 344 + ], + [ + 1248, + 360 + ], + [ + 1224, + 372 + ], + [ + 1199, + 376 + ], + [ + 1187, + 378 + ], + [ + 1179, + 385 + ], + [ + 1173, + 413 + ], + [ + 1173, + 435 + ], + [ + 1158, + 445 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1178, + 434 + ], + [ + 1185, + 441 + ], + [ + 1189, + 443 + ], + [ + 1190, + 448 + ], + [ + 1187, + 454 + ], + [ + 1186, + 465 + ], + [ + 1187, + 471 + ], + [ + 1183, + 478 + ], + [ + 1173, + 480 + ], + [ + 1164, + 471 + ], + [ + 1158, + 449 + ], + [ + 1160, + 436 + ], + [ + 1166, + 434 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1126, + 373 + ], + [ + 1124, + 404 + ], + [ + 1101, + 403 + ], + [ + 1100, + 372 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1100, + 405 + ], + [ + 1097, + 381 + ], + [ + 1100, + 381 + ], + [ + 1102, + 386 + ], + [ + 1149, + 387 + ], + [ + 1149, + 380 + ], + [ + 1155, + 379 + ], + [ + 1156, + 402 + ], + [ + 1163, + 410 + ], + [ + 1169, + 436 + ], + [ + 1172, + 445 + ], + [ + 1173, + 464 + ], + [ + 1173, + 476 + ], + [ + 1173, + 484 + ], + [ + 1170, + 488 + ], + [ + 1159, + 494 + ], + [ + 1151, + 494 + ], + [ + 1152, + 486 + ], + [ + 1152, + 482 + ], + [ + 1130, + 480 + ], + [ + 1111, + 459 + ], + [ + 1100, + 414 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1021, + 337 + ], + [ + 1059, + 339 + ], + [ + 1057, + 382 + ], + [ + 1020, + 382 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1024, + 420 + ], + [ + 1028, + 408 + ], + [ + 1032, + 399 + ], + [ + 1039, + 394 + ], + [ + 1061, + 394 + ], + [ + 1095, + 393 + ], + [ + 1107, + 399 + ], + [ + 1114, + 420 + ], + [ + 1124, + 437 + ], + [ + 1132, + 436 + ], + [ + 1135, + 444 + ], + [ + 1134, + 455 + ], + [ + 1126, + 456 + ], + [ + 1130, + 485 + ], + [ + 1130, + 499 + ], + [ + 1127, + 503 + ], + [ + 1076, + 505 + ], + [ + 1046, + 473 + ], + [ + 1031, + 436 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1079, + 477 + ], + [ + 1090, + 476 + ], + [ + 1100, + 472 + ], + [ + 1107, + 472 + ], + [ + 1115, + 473 + ], + [ + 1116, + 485 + ], + [ + 1112, + 491 + ], + [ + 1112, + 496 + ], + [ + 1116, + 501 + ], + [ + 1121, + 504 + ], + [ + 1113, + 511 + ], + [ + 1102, + 512 + ], + [ + 1084, + 512 + ], + [ + 1080, + 497 + ], + [ + 1077, + 488 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1057, + 236 + ], + [ + 1057, + 229 + ], + [ + 1046, + 229 + ], + [ + 1042, + 224 + ], + [ + 1032, + 223 + ], + [ + 1020, + 223 + ], + [ + 1011, + 216 + ], + [ + 1003, + 210 + ], + [ + 996, + 217 + ], + [ + 979, + 212 + ], + [ + 976, + 201 + ], + [ + 973, + 193 + ], + [ + 962, + 194 + ], + [ + 956, + 187 + ], + [ + 946, + 181 + ], + [ + 939, + 176 + ], + [ + 936, + 170 + ], + [ + 931, + 156 + ], + [ + 925, + 151 + ], + [ + 910, + 154 + ], + [ + 897, + 154 + ], + [ + 886, + 149 + ], + [ + 882, + 139 + ], + [ + 882, + 132 + ], + [ + 875, + 128 + ], + [ + 870, + 120 + ], + [ + 869, + 110 + ], + [ + 879, + 102 + ], + [ + 875, + 97 + ], + [ + 871, + 89 + ], + [ + 864, + 86 + ], + [ + 855, + 85 + ], + [ + 840, + 80 + ], + [ + 830, + 66 + ], + [ + 828, + 56 + ], + [ + 830, + 40 + ], + [ + 825, + 21 + ], + [ + 827, + 13 + ], + [ + 825, + 0 + ], + [ + 1360, + 0 + ], + [ + 1369, + 52 + ], + [ + 1357, + 78 + ], + [ + 1338, + 95 + ], + [ + 1301, + 120 + ], + [ + 1266, + 138 + ], + [ + 1202, + 169 + ], + [ + 1161, + 185 + ], + [ + 1153, + 204 + ], + [ + 1139, + 219 + ], + [ + 1123, + 229 + ], + [ + 1101, + 246 + ], + [ + 1089, + 251 + ], + [ + 1077, + 254 + ], + [ + 1072, + 320 + ], + [ + 1075, + 348 + ], + [ + 1069, + 404 + ], + [ + 1071, + 427 + ], + [ + 1048, + 424 + ], + [ + 1055, + 308 + ], + [ + 1057, + 260 + ], + [ + 1051, + 251 + ], + [ + 1052, + 243 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1079, + 477 + ], + [ + 1090, + 476 + ], + [ + 1100, + 472 + ], + [ + 1107, + 472 + ], + [ + 1115, + 473 + ], + [ + 1116, + 485 + ], + [ + 1112, + 491 + ], + [ + 1112, + 496 + ], + [ + 1116, + 501 + ], + [ + 1121, + 504 + ], + [ + 1113, + 511 + ], + [ + 1102, + 512 + ], + [ + 1084, + 512 + ], + [ + 1080, + 497 + ], + [ + 1077, + 488 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1000, + 419 + ], + [ + 1003, + 414 + ], + [ + 1016, + 413 + ], + [ + 1045, + 415 + ], + [ + 1065, + 421 + ], + [ + 1077, + 436 + ], + [ + 1092, + 445 + ], + [ + 1098, + 452 + ], + [ + 1097, + 464 + ], + [ + 1097, + 478 + ], + [ + 1095, + 505 + ], + [ + 1094, + 513 + ], + [ + 1089, + 517 + ], + [ + 1074, + 514 + ], + [ + 1020, + 454 + ], + [ + 1005, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 938, + 422 + ], + [ + 963, + 413 + ], + [ + 1000, + 413 + ], + [ + 1033, + 418 + ], + [ + 1051, + 423 + ], + [ + 1062, + 439 + ], + [ + 1068, + 449 + ], + [ + 1073, + 449 + ], + [ + 1079, + 449 + ], + [ + 1086, + 455 + ], + [ + 1082, + 460 + ], + [ + 1084, + 474 + ], + [ + 1084, + 494 + ], + [ + 1084, + 514 + ], + [ + 1079, + 525 + ], + [ + 1074, + 531 + ], + [ + 1072, + 537 + ], + [ + 1066, + 541 + ], + [ + 1057, + 542 + ], + [ + 1051, + 534 + ], + [ + 1049, + 526 + ], + [ + 1028, + 527 + ], + [ + 968, + 526 + ], + [ + 962, + 526 + ], + [ + 960, + 536 + ], + [ + 948, + 541 + ], + [ + 936, + 537 + ], + [ + 930, + 515 + ], + [ + 929, + 474 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 732, + 246 + ], + [ + 742, + 236 + ], + [ + 753, + 225 + ], + [ + 762, + 220 + ], + [ + 779, + 221 + ], + [ + 795, + 224 + ], + [ + 810, + 234 + ], + [ + 815, + 235 + ], + [ + 824, + 220 + ], + [ + 832, + 214 + ], + [ + 841, + 212 + ], + [ + 856, + 207 + ], + [ + 866, + 211 + ], + [ + 864, + 218 + ], + [ + 863, + 226 + ], + [ + 864, + 234 + ], + [ + 874, + 235 + ], + [ + 887, + 237 + ], + [ + 901, + 243 + ], + [ + 912, + 242 + ], + [ + 922, + 238 + ], + [ + 937, + 232 + ], + [ + 948, + 230 + ], + [ + 962, + 229 + ], + [ + 980, + 229 + ], + [ + 975, + 234 + ], + [ + 965, + 243 + ], + [ + 976, + 245 + ], + [ + 990, + 248 + ], + [ + 1010, + 257 + ], + [ + 1019, + 260 + ], + [ + 1030, + 261 + ], + [ + 1030, + 271 + ], + [ + 1027, + 276 + ], + [ + 1035, + 281 + ], + [ + 1039, + 284 + ], + [ + 1031, + 290 + ], + [ + 1003, + 296 + ], + [ + 1027, + 305 + ], + [ + 1030, + 309 + ], + [ + 1026, + 317 + ], + [ + 1009, + 320 + ], + [ + 987, + 313 + ], + [ + 963, + 314 + ], + [ + 981, + 336 + ], + [ + 998, + 349 + ], + [ + 1009, + 354 + ], + [ + 1003, + 367 + ], + [ + 991, + 367 + ], + [ + 1011, + 383 + ], + [ + 1015, + 394 + ], + [ + 1010, + 398 + ], + [ + 996, + 394 + ], + [ + 979, + 391 + ], + [ + 970, + 385 + ], + [ + 956, + 382 + ], + [ + 940, + 387 + ], + [ + 935, + 390 + ], + [ + 932, + 390 + ], + [ + 944, + 403 + ], + [ + 960, + 408 + ], + [ + 974, + 420 + ], + [ + 989, + 425 + ], + [ + 1006, + 425 + ], + [ + 1018, + 426 + ], + [ + 1027, + 437 + ], + [ + 1040, + 439 + ], + [ + 1045, + 442 + ], + [ + 1028, + 459 + ], + [ + 1013, + 464 + ], + [ + 1018, + 474 + ], + [ + 1032, + 488 + ], + [ + 1027, + 501 + ], + [ + 1014, + 503 + ], + [ + 999, + 504 + ], + [ + 988, + 514 + ], + [ + 989, + 522 + ], + [ + 986, + 530 + ], + [ + 968, + 539 + ], + [ + 962, + 544 + ], + [ + 968, + 552 + ], + [ + 980, + 556 + ], + [ + 981, + 572 + ], + [ + 948, + 580 + ], + [ + 888, + 603 + ], + [ + 827, + 628 + ], + [ + 813, + 633 + ], + [ + 791, + 649 + ], + [ + 771, + 671 + ], + [ + 742, + 683 + ], + [ + 676, + 691 + ], + [ + 545, + 679 + ], + [ + 477, + 412 + ], + [ + 488, + 400 + ], + [ + 492, + 388 + ], + [ + 490, + 376 + ], + [ + 467, + 375 + ], + [ + 450, + 376 + ], + [ + 445, + 365 + ], + [ + 444, + 351 + ], + [ + 463, + 340 + ], + [ + 484, + 344 + ], + [ + 503, + 350 + ], + [ + 524, + 346 + ], + [ + 527, + 340 + ], + [ + 517, + 326 + ], + [ + 509, + 313 + ], + [ + 503, + 293 + ], + [ + 509, + 280 + ], + [ + 523, + 277 + ], + [ + 549, + 284 + ], + [ + 561, + 286 + ], + [ + 568, + 298 + ], + [ + 569, + 312 + ], + [ + 574, + 313 + ], + [ + 592, + 311 + ], + [ + 618, + 285 + ], + [ + 645, + 260 + ], + [ + 674, + 244 + ], + [ + 691, + 246 + ], + [ + 699, + 254 + ], + [ + 709, + 258 + ], + [ + 718, + 256 + ], + [ + 728, + 249 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 33, + 353 + ], + [ + 129, + 353 + ], + [ + 211, + 352 + ], + [ + 318, + 356 + ], + [ + 417, + 367 + ], + [ + 456, + 377 + ], + [ + 502, + 411 + ], + [ + 552, + 461 + ], + [ + 579, + 467 + ], + [ + 596, + 468 + ], + [ + 605, + 475 + ], + [ + 615, + 488 + ], + [ + 605, + 504 + ], + [ + 590, + 509 + ], + [ + 604, + 521 + ], + [ + 645, + 547 + ], + [ + 657, + 564 + ], + [ + 666, + 597 + ], + [ + 668, + 659 + ], + [ + 666, + 692 + ], + [ + 659, + 721 + ], + [ + 650, + 740 + ], + [ + 628, + 748 + ], + [ + 591, + 749 + ], + [ + 580, + 741 + ], + [ + 575, + 730 + ], + [ + 553, + 738 + ], + [ + 483, + 761 + ], + [ + 450, + 775 + ], + [ + 422, + 788 + ], + [ + 412, + 810 + ], + [ + 404, + 841 + ], + [ + 387, + 877 + ], + [ + 365, + 888 + ], + [ + 329, + 897 + ], + [ + 287, + 893 + ], + [ + 256, + 873 + ], + [ + 237, + 818 + ], + [ + 68, + 817 + ], + [ + 0, + 821 + ], + [ + 0, + 355 + ], + [ + 6, + 353 + ], + [ + 22, + 353 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 588, + 439 + ], + [ + 596, + 331 + ], + [ + 594, + 232 + ], + [ + 588, + 166 + ], + [ + 596, + 29 + ], + [ + 595, + 0 + ], + [ + 666, + 1 + ], + [ + 665, + 81 + ], + [ + 663, + 145 + ], + [ + 669, + 198 + ], + [ + 670, + 243 + ], + [ + 667, + 286 + ], + [ + 669, + 381 + ], + [ + 669, + 422 + ], + [ + 661, + 442 + ], + [ + 630, + 443 + ], + [ + 601, + 441 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 756, + 2 + ], + [ + 756, + 302 + ], + [ + 741, + 299 + ], + [ + 744, + 0 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1334, + 419 + ], + [ + 1333, + 444 + ], + [ + 1316, + 444 + ], + [ + 1317, + 420 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1298, + 439 + ], + [ + 1306, + 437 + ], + [ + 1335, + 440 + ], + [ + 1329, + 477 + ], + [ + 1317, + 479 + ], + [ + 1309, + 479 + ], + [ + 1308, + 477 + ], + [ + 1305, + 477 + ], + [ + 1302, + 479 + ], + [ + 1294, + 479 + ], + [ + 1288, + 477 + ], + [ + 1286, + 466 + ], + [ + 1288, + 454 + ], + [ + 1292, + 445 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1361, + 421 + ], + [ + 1360, + 406 + ], + [ + 1364, + 396 + ], + [ + 1371, + 396 + ], + [ + 1376, + 408 + ], + [ + 1371, + 424 + ], + [ + 1362, + 431 + ], + [ + 1360, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1349, + 429 + ], + [ + 1367, + 429 + ], + [ + 1363, + 463 + ], + [ + 1347, + 484 + ], + [ + 1334, + 484 + ], + [ + 1332, + 488 + ], + [ + 1322, + 489 + ], + [ + 1319, + 477 + ], + [ + 1319, + 467 + ], + [ + 1323, + 452 + ], + [ + 1331, + 436 + ], + [ + 1339, + 429 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1373, + 430 + ], + [ + 1373, + 410 + ], + [ + 1363, + 398 + ], + [ + 1358, + 394 + ], + [ + 1354, + 389 + ], + [ + 1351, + 379 + ], + [ + 1363, + 372 + ], + [ + 1364, + 369 + ], + [ + 1364, + 363 + ], + [ + 1347, + 364 + ], + [ + 1336, + 358 + ], + [ + 1329, + 342 + ], + [ + 1316, + 345 + ], + [ + 1283, + 352 + ], + [ + 1275, + 335 + ], + [ + 1280, + 298 + ], + [ + 1269, + 300 + ], + [ + 1259, + 292 + ], + [ + 1257, + 244 + ], + [ + 1262, + 228 + ], + [ + 1276, + 214 + ], + [ + 1280, + 201 + ], + [ + 1269, + 190 + ], + [ + 1250, + 187 + ], + [ + 1227, + 182 + ], + [ + 1214, + 174 + ], + [ + 1208, + 167 + ], + [ + 1204, + 143 + ], + [ + 1205, + 123 + ], + [ + 1225, + 105 + ], + [ + 1213, + 97 + ], + [ + 1199, + 97 + ], + [ + 1189, + 91 + ], + [ + 1183, + 75 + ], + [ + 1195, + 61 + ], + [ + 1218, + 53 + ], + [ + 1247, + 46 + ], + [ + 1249, + 35 + ], + [ + 1235, + 33 + ], + [ + 1229, + 29 + ], + [ + 1223, + 20 + ], + [ + 1214, + 15 + ], + [ + 1203, + 0 + ], + [ + 1204, + 0 + ], + [ + 1558, + 0 + ], + [ + 1557, + 7 + ], + [ + 1546, + 19 + ], + [ + 1537, + 28 + ], + [ + 1523, + 38 + ], + [ + 1520, + 47 + ], + [ + 1525, + 60 + ], + [ + 1519, + 71 + ], + [ + 1511, + 74 + ], + [ + 1499, + 80 + ], + [ + 1497, + 86 + ], + [ + 1497, + 97 + ], + [ + 1487, + 109 + ], + [ + 1481, + 116 + ], + [ + 1474, + 126 + ], + [ + 1476, + 139 + ], + [ + 1473, + 150 + ], + [ + 1474, + 157 + ], + [ + 1475, + 166 + ], + [ + 1471, + 178 + ], + [ + 1465, + 183 + ], + [ + 1457, + 190 + ], + [ + 1453, + 199 + ], + [ + 1453, + 209 + ], + [ + 1449, + 223 + ], + [ + 1465, + 229 + ], + [ + 1474, + 233 + ], + [ + 1475, + 240 + ], + [ + 1474, + 252 + ], + [ + 1470, + 261 + ], + [ + 1460, + 265 + ], + [ + 1457, + 280 + ], + [ + 1460, + 298 + ], + [ + 1464, + 311 + ], + [ + 1460, + 337 + ], + [ + 1458, + 348 + ], + [ + 1456, + 350 + ], + [ + 1452, + 356 + ], + [ + 1447, + 358 + ], + [ + 1428, + 362 + ], + [ + 1426, + 367 + ], + [ + 1427, + 372 + ], + [ + 1424, + 381 + ], + [ + 1419, + 385 + ], + [ + 1411, + 391 + ], + [ + 1407, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1383, + 422 + ], + [ + 1403, + 423 + ], + [ + 1417, + 430 + ], + [ + 1372, + 493 + ], + [ + 1365, + 495 + ], + [ + 1360, + 504 + ], + [ + 1349, + 500 + ], + [ + 1342, + 491 + ], + [ + 1344, + 465 + ], + [ + 1348, + 450 + ], + [ + 1356, + 434 + ], + [ + 1364, + 425 + ], + [ + 1376, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1425, + 431 + ], + [ + 1376, + 508 + ], + [ + 1365, + 503 + ], + [ + 1366, + 476 + ], + [ + 1371, + 456 + ], + [ + 1378, + 441 + ], + [ + 1388, + 431 + ], + [ + 1407, + 428 + ], + [ + 1420, + 429 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1425, + 425 + ], + [ + 1442, + 429 + ], + [ + 1396, + 517 + ], + [ + 1385, + 519 + ], + [ + 1372, + 511 + ], + [ + 1375, + 480 + ], + [ + 1385, + 451 + ], + [ + 1399, + 429 + ], + [ + 1411, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1437, + 422 + ], + [ + 1470, + 424 + ], + [ + 1416, + 528 + ], + [ + 1402, + 524 + ], + [ + 1393, + 514 + ], + [ + 1393, + 497 + ], + [ + 1397, + 478 + ], + [ + 1405, + 460 + ], + [ + 1412, + 444 + ], + [ + 1425, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1450, + 433 + ], + [ + 1447, + 439 + ], + [ + 1415, + 526 + ], + [ + 1409, + 526 + ], + [ + 1407, + 507 + ], + [ + 1414, + 474 + ], + [ + 1417, + 462 + ], + [ + 1423, + 451 + ], + [ + 1430, + 440 + ], + [ + 1442, + 431 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1473, + 427 + ], + [ + 1489, + 430 + ], + [ + 1433, + 530 + ], + [ + 1420, + 532 + ], + [ + 1413, + 519 + ], + [ + 1416, + 505 + ], + [ + 1417, + 491 + ], + [ + 1417, + 484 + ], + [ + 1421, + 474 + ], + [ + 1418, + 467 + ], + [ + 1416, + 465 + ], + [ + 1422, + 456 + ], + [ + 1427, + 454 + ], + [ + 1438, + 444 + ], + [ + 1447, + 431 + ], + [ + 1462, + 427 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1421, + 131 + ], + [ + 1457, + 131 + ], + [ + 1485, + 138 + ], + [ + 1516, + 162 + ], + [ + 1535, + 187 + ], + [ + 1545, + 217 + ], + [ + 1544, + 407 + ], + [ + 1536, + 408 + ], + [ + 1540, + 219 + ], + [ + 1527, + 184 + ], + [ + 1508, + 160 + ], + [ + 1481, + 144 + ], + [ + 1452, + 135 + ], + [ + 1415, + 133 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1397, + 126 + ], + [ + 1407, + 112 + ], + [ + 1412, + 108 + ], + [ + 1418, + 109 + ], + [ + 1422, + 118 + ], + [ + 1430, + 128 + ], + [ + 1430, + 137 + ], + [ + 1399, + 138 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1486, + 416 + ], + [ + 1504, + 404 + ], + [ + 1526, + 399 + ], + [ + 1546, + 400 + ], + [ + 1587, + 402 + ], + [ + 1509, + 592 + ], + [ + 1484, + 599 + ], + [ + 1474, + 591 + ], + [ + 1472, + 576 + ], + [ + 1462, + 574 + ], + [ + 1460, + 581 + ], + [ + 1453, + 583 + ], + [ + 1428, + 580 + ], + [ + 1424, + 569 + ], + [ + 1424, + 529 + ], + [ + 1431, + 502 + ], + [ + 1446, + 477 + ], + [ + 1428, + 477 + ], + [ + 1426, + 465 + ], + [ + 1432, + 458 + ], + [ + 1451, + 464 + ], + [ + 1458, + 459 + ], + [ + 1469, + 440 + ], + [ + 1480, + 424 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1739, + 287 + ], + [ + 1734, + 385 + ], + [ + 1727, + 383 + ], + [ + 1728, + 289 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1735, + 245 + ], + [ + 1746, + 249 + ], + [ + 1753, + 258 + ], + [ + 1754, + 270 + ], + [ + 1753, + 284 + ], + [ + 1744, + 295 + ], + [ + 1732, + 297 + ], + [ + 1721, + 294 + ], + [ + 1711, + 286 + ], + [ + 1710, + 270 + ], + [ + 1719, + 253 + ], + [ + 1723, + 249 + ], + [ + 1729, + 246 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1715, + 18 + ], + [ + 1706, + 17 + ], + [ + 1695, + 18 + ], + [ + 1689, + 29 + ], + [ + 1675, + 36 + ], + [ + 1663, + 36 + ], + [ + 1653, + 36 + ], + [ + 1648, + 25 + ], + [ + 1644, + 18 + ], + [ + 1635, + 11 + ], + [ + 1619, + 18 + ], + [ + 1610, + 32 + ], + [ + 1596, + 38 + ], + [ + 1580, + 51 + ], + [ + 1568, + 48 + ], + [ + 1552, + 43 + ], + [ + 1539, + 35 + ], + [ + 1537, + 20 + ], + [ + 1540, + 6 + ], + [ + 1541, + 0 + ], + [ + 1792, + 0 + ], + [ + 1786, + 0 + ], + [ + 1780, + 24 + ], + [ + 1785, + 67 + ], + [ + 1793, + 145 + ], + [ + 1807, + 266 + ], + [ + 1814, + 373 + ], + [ + 1828, + 394 + ], + [ + 1767, + 390 + ], + [ + 1765, + 329 + ], + [ + 1760, + 263 + ], + [ + 1755, + 192 + ], + [ + 1744, + 129 + ], + [ + 1737, + 90 + ], + [ + 1734, + 57 + ], + [ + 1722, + 38 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1870, + 408 + ], + [ + 1874, + 386 + ], + [ + 1891, + 386 + ], + [ + 1903, + 384 + ], + [ + 1897, + 374 + ], + [ + 1887, + 367 + ], + [ + 1889, + 364 + ], + [ + 1900, + 364 + ], + [ + 1911, + 369 + ], + [ + 1914, + 362 + ], + [ + 1910, + 354 + ], + [ + 1914, + 344 + ], + [ + 1904, + 339 + ], + [ + 1894, + 335 + ], + [ + 1901, + 327 + ], + [ + 1907, + 321 + ], + [ + 1903, + 310 + ], + [ + 1901, + 300 + ], + [ + 1908, + 288 + ], + [ + 1924, + 287 + ], + [ + 1944, + 285 + ], + [ + 1975, + 274 + ], + [ + 1992, + 269 + ], + [ + 1998, + 259 + ], + [ + 2002, + 247 + ], + [ + 2013, + 248 + ], + [ + 2026, + 247 + ], + [ + 2025, + 242 + ], + [ + 2019, + 240 + ], + [ + 2023, + 235 + ], + [ + 2038, + 232 + ], + [ + 2044, + 230 + ], + [ + 2039, + 224 + ], + [ + 2038, + 216 + ], + [ + 2041, + 204 + ], + [ + 2039, + 197 + ], + [ + 2048, + 186 + ], + [ + 2048, + 588 + ], + [ + 1925, + 569 + ], + [ + 1894, + 499 + ], + [ + 1880, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1584, + 387 + ], + [ + 1600, + 375 + ], + [ + 1619, + 365 + ], + [ + 1640, + 365 + ], + [ + 1659, + 371 + ], + [ + 1665, + 376 + ], + [ + 1700, + 377 + ], + [ + 1759, + 378 + ], + [ + 1778, + 377 + ], + [ + 1809, + 371 + ], + [ + 1837, + 377 + ], + [ + 1875, + 394 + ], + [ + 1907, + 436 + ], + [ + 1932, + 492 + ], + [ + 1951, + 552 + ], + [ + 1900, + 642 + ], + [ + 1885, + 698 + ], + [ + 1835, + 696 + ], + [ + 1837, + 658 + ], + [ + 1684, + 654 + ], + [ + 1670, + 654 + ], + [ + 1651, + 654 + ], + [ + 1632, + 650 + ], + [ + 1615, + 654 + ], + [ + 1608, + 651 + ], + [ + 1596, + 650 + ], + [ + 1592, + 672 + ], + [ + 1587, + 685 + ], + [ + 1575, + 693 + ], + [ + 1552, + 693 + ], + [ + 1539, + 685 + ], + [ + 1535, + 671 + ], + [ + 1539, + 633 + ], + [ + 1539, + 623 + ], + [ + 1531, + 618 + ], + [ + 1528, + 616 + ], + [ + 1524, + 625 + ], + [ + 1516, + 631 + ], + [ + 1492, + 627 + ], + [ + 1488, + 609 + ], + [ + 1491, + 536 + ], + [ + 1495, + 503 + ], + [ + 1507, + 488 + ], + [ + 1515, + 475 + ], + [ + 1505, + 468 + ], + [ + 1503, + 454 + ], + [ + 1513, + 447 + ], + [ + 1519, + 447 + ], + [ + 1529, + 455 + ], + [ + 1539, + 438 + ], + [ + 1570, + 398 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1707, + 523 + ], + [ + 1834, + 530 + ], + [ + 1836, + 560 + ], + [ + 1705, + 555 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1993, + 507 + ], + [ + 2025, + 473 + ], + [ + 2048, + 447 + ], + [ + 2048, + 819 + ], + [ + 2005, + 798 + ], + [ + 1983, + 791 + ], + [ + 1977, + 806 + ], + [ + 1952, + 819 + ], + [ + 1926, + 813 + ], + [ + 1902, + 805 + ], + [ + 1885, + 781 + ], + [ + 1875, + 746 + ], + [ + 1871, + 716 + ], + [ + 1872, + 693 + ], + [ + 1882, + 665 + ], + [ + 1889, + 634 + ], + [ + 1900, + 600 + ], + [ + 1908, + 577 + ], + [ + 1924, + 558 + ], + [ + 1946, + 541 + ], + [ + 1957, + 531 + ], + [ + 1965, + 519 + ], + [ + 1974, + 512 + ], + [ + 1988, + 508 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000008_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000008_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..f06dd3a63f4d3bc3f3af8676fa343b7597850940 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000008_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..ef722ded55d8cc18e56140e1125643860fe58d2c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..68a5b4622886083484b73ff1bdd627cf94b3b1cd Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c1d869e5ff0215a19a4237fe0146b69eb00f9eea Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..3d2edeba5cc0b6de564bc41fff59db2401e5593d --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000009_000019_gtFine_polygons.json @@ -0,0 +1,6216 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 453, + 1 + ], + [ + 1057, + 3 + ], + [ + 977, + 192 + ], + [ + 498, + 235 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 658, + 458 + ], + [ + 917, + 456 + ], + [ + 1110, + 457 + ], + [ + 1300, + 484 + ], + [ + 1658, + 567 + ], + [ + 1927, + 654 + ], + [ + 2048, + 707 + ], + [ + 2048, + 1024 + ], + [ + 1, + 1024 + ], + [ + 1, + 531 + ], + [ + 438, + 482 + ], + [ + 596, + 465 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 806, + 450 + ], + [ + 1026, + 450 + ], + [ + 1166, + 462 + ], + [ + 1149, + 472 + ], + [ + 1072, + 479 + ], + [ + 1014, + 484 + ], + [ + 906, + 478 + ], + [ + 775, + 471 + ], + [ + 770, + 466 + ], + [ + 774, + 458 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 640, + 207 + ], + [ + 684, + 186 + ], + [ + 685, + 166 + ], + [ + 691, + 163 + ], + [ + 691, + 155 + ], + [ + 706, + 153 + ], + [ + 713, + 150 + ], + [ + 721, + 145 + ], + [ + 728, + 138 + ], + [ + 741, + 138 + ], + [ + 743, + 131 + ], + [ + 751, + 131 + ], + [ + 778, + 118 + ], + [ + 781, + 106 + ], + [ + 791, + 104 + ], + [ + 792, + 110 + ], + [ + 803, + 110 + ], + [ + 815, + 110 + ], + [ + 822, + 100 + ], + [ + 835, + 98 + ], + [ + 835, + 106 + ], + [ + 835, + 118 + ], + [ + 860, + 118 + ], + [ + 862, + 93 + ], + [ + 885, + 89 + ], + [ + 880, + 85 + ], + [ + 881, + 82 + ], + [ + 905, + 77 + ], + [ + 901, + 71 + ], + [ + 912, + 66 + ], + [ + 915, + 60 + ], + [ + 936, + 45 + ], + [ + 942, + 37 + ], + [ + 962, + 26 + ], + [ + 996, + 10 + ], + [ + 1017, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1 + ], + [ + 2048, + 635 + ], + [ + 1529, + 497 + ], + [ + 1466, + 486 + ], + [ + 1147, + 462 + ], + [ + 1072, + 469 + ], + [ + 1024, + 469 + ], + [ + 830, + 462 + ], + [ + 708, + 467 + ], + [ + 629, + 463 + ], + [ + 489, + 424 + ], + [ + 497, + 274 + ], + [ + 602, + 217 + ], + [ + 623, + 212 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 792, + 327 + ], + [ + 794, + 422 + ], + [ + 790, + 424 + ], + [ + 787, + 328 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 693, + 342 + ], + [ + 749, + 353 + ], + [ + 761, + 361 + ], + [ + 764, + 379 + ], + [ + 768, + 447 + ], + [ + 763, + 445 + ], + [ + 760, + 376 + ], + [ + 758, + 364 + ], + [ + 745, + 355 + ], + [ + 686, + 345 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 755, + 402 + ], + [ + 769, + 403 + ], + [ + 769, + 422 + ], + [ + 755, + 423 + ], + [ + 753, + 421 + ], + [ + 751, + 407 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 784, + 409 + ], + [ + 773, + 423 + ], + [ + 763, + 411 + ], + [ + 775, + 410 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 773, + 396 + ], + [ + 778, + 399 + ], + [ + 780, + 405 + ], + [ + 779, + 410 + ], + [ + 769, + 411 + ], + [ + 765, + 405 + ], + [ + 768, + 399 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 1 + ], + [ + 490, + 2 + ], + [ + 498, + 284 + ], + [ + 495, + 456 + ], + [ + 468, + 481 + ], + [ + 402, + 500 + ], + [ + 321, + 512 + ], + [ + 257, + 522 + ], + [ + 201, + 532 + ], + [ + 125, + 534 + ], + [ + 32, + 538 + ], + [ + 0, + 538 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 669, + 301 + ], + [ + 671, + 462 + ], + [ + 667, + 462 + ], + [ + 662, + 302 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 530, + 278 + ], + [ + 569, + 262 + ], + [ + 572, + 279 + ], + [ + 582, + 288 + ], + [ + 597, + 290 + ], + [ + 597, + 284 + ], + [ + 594, + 280 + ], + [ + 593, + 275 + ], + [ + 599, + 268 + ], + [ + 620, + 254 + ], + [ + 651, + 248 + ], + [ + 657, + 260 + ], + [ + 671, + 274 + ], + [ + 673, + 280 + ], + [ + 673, + 291 + ], + [ + 673, + 305 + ], + [ + 678, + 306 + ], + [ + 686, + 302 + ], + [ + 695, + 302 + ], + [ + 699, + 307 + ], + [ + 700, + 324 + ], + [ + 692, + 332 + ], + [ + 693, + 335 + ], + [ + 705, + 336 + ], + [ + 707, + 340 + ], + [ + 706, + 350 + ], + [ + 702, + 357 + ], + [ + 693, + 365 + ], + [ + 689, + 377 + ], + [ + 692, + 383 + ], + [ + 693, + 394 + ], + [ + 693, + 400 + ], + [ + 691, + 415 + ], + [ + 685, + 420 + ], + [ + 664, + 432 + ], + [ + 657, + 427 + ], + [ + 651, + 426 + ], + [ + 645, + 426 + ], + [ + 643, + 433 + ], + [ + 643, + 450 + ], + [ + 637, + 451 + ], + [ + 632, + 438 + ], + [ + 594, + 426 + ], + [ + 544, + 412 + ], + [ + 514, + 401 + ], + [ + 491, + 381 + ], + [ + 489, + 340 + ], + [ + 504, + 296 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 680, + 443 + ], + [ + 704, + 445 + ], + [ + 713, + 453 + ], + [ + 710, + 475 + ], + [ + 706, + 477 + ], + [ + 703, + 475 + ], + [ + 697, + 474 + ], + [ + 697, + 477 + ], + [ + 692, + 477 + ], + [ + 691, + 475 + ], + [ + 681, + 474 + ], + [ + 676, + 478 + ], + [ + 668, + 474 + ], + [ + 665, + 464 + ], + [ + 665, + 456 + ], + [ + 670, + 447 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 715, + 429 + ], + [ + 735, + 426 + ], + [ + 766, + 429 + ], + [ + 767, + 453 + ], + [ + 768, + 468 + ], + [ + 766, + 476 + ], + [ + 761, + 476 + ], + [ + 759, + 473 + ], + [ + 754, + 473 + ], + [ + 750, + 476 + ], + [ + 746, + 476 + ], + [ + 744, + 472 + ], + [ + 730, + 472 + ], + [ + 728, + 476 + ], + [ + 724, + 476 + ], + [ + 723, + 472 + ], + [ + 714, + 473 + ], + [ + 710, + 477 + ], + [ + 706, + 473 + ], + [ + 706, + 460 + ], + [ + 708, + 449 + ], + [ + 706, + 449 + ], + [ + 707, + 440 + ], + [ + 711, + 440 + ], + [ + 712, + 433 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 626, + 451 + ], + [ + 639, + 446 + ], + [ + 663, + 448 + ], + [ + 671, + 461 + ], + [ + 676, + 464 + ], + [ + 676, + 466 + ], + [ + 676, + 471 + ], + [ + 678, + 473 + ], + [ + 676, + 476 + ], + [ + 677, + 477 + ], + [ + 676, + 491 + ], + [ + 672, + 497 + ], + [ + 664, + 493 + ], + [ + 643, + 476 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 770, + 439 + ], + [ + 774, + 432 + ], + [ + 784, + 422 + ], + [ + 788, + 414 + ], + [ + 795, + 414 + ], + [ + 804, + 413 + ], + [ + 815, + 409 + ], + [ + 823, + 411 + ], + [ + 824, + 451 + ], + [ + 831, + 456 + ], + [ + 837, + 460 + ], + [ + 843, + 469 + ], + [ + 825, + 473 + ], + [ + 805, + 477 + ], + [ + 779, + 470 + ], + [ + 771, + 463 + ], + [ + 768, + 456 + ], + [ + 769, + 447 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 617, + 450 + ], + [ + 639, + 448 + ], + [ + 654, + 452 + ], + [ + 664, + 465 + ], + [ + 670, + 476 + ], + [ + 672, + 491 + ], + [ + 672, + 499 + ], + [ + 667, + 502 + ], + [ + 654, + 503 + ], + [ + 647, + 505 + ], + [ + 643, + 505 + ], + [ + 637, + 503 + ], + [ + 629, + 503 + ], + [ + 616, + 504 + ], + [ + 607, + 505 + ], + [ + 599, + 505 + ], + [ + 597, + 495 + ], + [ + 600, + 479 + ], + [ + 612, + 461 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 512, + 388 + ], + [ + 537, + 389 + ], + [ + 570, + 387 + ], + [ + 604, + 391 + ], + [ + 618, + 394 + ], + [ + 626, + 404 + ], + [ + 633, + 443 + ], + [ + 634, + 480 + ], + [ + 630, + 492 + ], + [ + 627, + 496 + ], + [ + 622, + 508 + ], + [ + 618, + 510 + ], + [ + 614, + 510 + ], + [ + 608, + 507 + ], + [ + 606, + 499 + ], + [ + 598, + 499 + ], + [ + 587, + 504 + ], + [ + 578, + 506 + ], + [ + 577, + 512 + ], + [ + 574, + 517 + ], + [ + 569, + 517 + ], + [ + 563, + 512 + ], + [ + 561, + 507 + ], + [ + 554, + 507 + ], + [ + 548, + 507 + ], + [ + 541, + 513 + ], + [ + 516, + 508 + ], + [ + 491, + 478 + ], + [ + 474, + 459 + ], + [ + 470, + 442 + ], + [ + 471, + 436 + ], + [ + 480, + 435 + ], + [ + 481, + 438 + ], + [ + 490, + 428 + ], + [ + 495, + 404 + ], + [ + 503, + 394 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 585, + 251 + ], + [ + 583, + 312 + ], + [ + 574, + 313 + ], + [ + 574, + 258 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 483, + 318 + ], + [ + 470, + 298 + ], + [ + 457, + 294 + ], + [ + 443, + 293 + ], + [ + 427, + 296 + ], + [ + 409, + 296 + ], + [ + 390, + 291 + ], + [ + 367, + 293 + ], + [ + 334, + 295 + ], + [ + 324, + 293 + ], + [ + 315, + 286 + ], + [ + 309, + 268 + ], + [ + 306, + 253 + ], + [ + 303, + 232 + ], + [ + 308, + 205 + ], + [ + 329, + 189 + ], + [ + 341, + 179 + ], + [ + 360, + 168 + ], + [ + 368, + 143 + ], + [ + 404, + 7 + ], + [ + 414, + 0 + ], + [ + 523, + 1 + ], + [ + 527, + 8 + ], + [ + 530, + 11 + ], + [ + 537, + 12 + ], + [ + 546, + 13 + ], + [ + 559, + 14 + ], + [ + 569, + 21 + ], + [ + 579, + 22 + ], + [ + 585, + 27 + ], + [ + 591, + 42 + ], + [ + 604, + 49 + ], + [ + 608, + 60 + ], + [ + 612, + 75 + ], + [ + 617, + 75 + ], + [ + 620, + 81 + ], + [ + 624, + 91 + ], + [ + 630, + 95 + ], + [ + 638, + 102 + ], + [ + 641, + 107 + ], + [ + 643, + 118 + ], + [ + 643, + 124 + ], + [ + 629, + 137 + ], + [ + 633, + 142 + ], + [ + 642, + 146 + ], + [ + 651, + 149 + ], + [ + 657, + 153 + ], + [ + 661, + 157 + ], + [ + 669, + 157 + ], + [ + 678, + 156 + ], + [ + 681, + 159 + ], + [ + 683, + 168 + ], + [ + 677, + 179 + ], + [ + 671, + 191 + ], + [ + 674, + 201 + ], + [ + 670, + 207 + ], + [ + 667, + 215 + ], + [ + 676, + 222 + ], + [ + 683, + 228 + ], + [ + 683, + 233 + ], + [ + 677, + 236 + ], + [ + 666, + 237 + ], + [ + 657, + 250 + ], + [ + 657, + 260 + ], + [ + 657, + 271 + ], + [ + 628, + 274 + ], + [ + 619, + 272 + ], + [ + 597, + 274 + ], + [ + 578, + 282 + ], + [ + 561, + 283 + ], + [ + 544, + 288 + ], + [ + 528, + 297 + ], + [ + 513, + 306 + ], + [ + 500, + 324 + ], + [ + 496, + 342 + ], + [ + 504, + 464 + ], + [ + 488, + 464 + ], + [ + 488, + 413 + ], + [ + 481, + 345 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 506, + 503 + ], + [ + 534, + 511 + ], + [ + 556, + 513 + ], + [ + 558, + 518 + ], + [ + 537, + 519 + ], + [ + 508, + 518 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 423, + 459 + ], + [ + 430, + 449 + ], + [ + 449, + 445 + ], + [ + 469, + 443 + ], + [ + 504, + 445 + ], + [ + 517, + 455 + ], + [ + 528, + 480 + ], + [ + 531, + 504 + ], + [ + 528, + 517 + ], + [ + 522, + 521 + ], + [ + 510, + 522 + ], + [ + 507, + 518 + ], + [ + 488, + 519 + ], + [ + 485, + 526 + ], + [ + 470, + 529 + ], + [ + 462, + 524 + ], + [ + 438, + 504 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 381, + 450 + ], + [ + 409, + 442 + ], + [ + 431, + 445 + ], + [ + 450, + 472 + ], + [ + 457, + 491 + ], + [ + 457, + 505 + ], + [ + 457, + 523 + ], + [ + 456, + 531 + ], + [ + 449, + 533 + ], + [ + 437, + 530 + ], + [ + 422, + 528 + ], + [ + 406, + 528 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 60, + 328 + ], + [ + 76, + 346 + ], + [ + 76, + 363 + ], + [ + 70, + 379 + ], + [ + 62, + 381 + ], + [ + 47, + 380 + ], + [ + 36, + 373 + ], + [ + 35, + 354 + ], + [ + 43, + 339 + ], + [ + 52, + 329 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 58, + 335 + ], + [ + 61, + 457 + ], + [ + 50, + 464 + ], + [ + 53, + 335 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 115, + 295 + ], + [ + 102, + 284 + ], + [ + 78, + 287 + ], + [ + 63, + 298 + ], + [ + 60, + 300 + ], + [ + 46, + 316 + ], + [ + 37, + 320 + ], + [ + 0, + 325 + ], + [ + 0, + 1 + ], + [ + 481, + 1 + ], + [ + 489, + 7 + ], + [ + 500, + 21 + ], + [ + 508, + 34 + ], + [ + 519, + 43 + ], + [ + 529, + 58 + ], + [ + 525, + 70 + ], + [ + 514, + 78 + ], + [ + 500, + 97 + ], + [ + 477, + 110 + ], + [ + 449, + 133 + ], + [ + 422, + 139 + ], + [ + 393, + 143 + ], + [ + 376, + 157 + ], + [ + 348, + 164 + ], + [ + 322, + 169 + ], + [ + 318, + 179 + ], + [ + 335, + 179 + ], + [ + 351, + 184 + ], + [ + 354, + 197 + ], + [ + 349, + 214 + ], + [ + 323, + 241 + ], + [ + 322, + 255 + ], + [ + 325, + 262 + ], + [ + 315, + 270 + ], + [ + 298, + 274 + ], + [ + 279, + 277 + ], + [ + 271, + 288 + ], + [ + 274, + 300 + ], + [ + 276, + 309 + ], + [ + 280, + 317 + ], + [ + 281, + 328 + ], + [ + 277, + 329 + ], + [ + 269, + 326 + ], + [ + 264, + 311 + ], + [ + 263, + 300 + ], + [ + 246, + 298 + ], + [ + 228, + 300 + ], + [ + 214, + 307 + ], + [ + 194, + 309 + ], + [ + 187, + 308 + ], + [ + 169, + 313 + ], + [ + 163, + 322 + ], + [ + 149, + 331 + ], + [ + 139, + 336 + ], + [ + 140, + 412 + ], + [ + 150, + 394 + ], + [ + 167, + 380 + ], + [ + 178, + 371 + ], + [ + 189, + 370 + ], + [ + 196, + 371 + ], + [ + 200, + 377 + ], + [ + 215, + 393 + ], + [ + 218, + 398 + ], + [ + 210, + 406 + ], + [ + 193, + 410 + ], + [ + 195, + 423 + ], + [ + 198, + 438 + ], + [ + 196, + 475 + ], + [ + 187, + 540 + ], + [ + 192, + 564 + ], + [ + 182, + 574 + ], + [ + 152, + 578 + ], + [ + 115, + 580 + ], + [ + 102, + 519 + ], + [ + 94, + 490 + ], + [ + 90, + 457 + ], + [ + 97, + 443 + ], + [ + 112, + 415 + ], + [ + 112, + 367 + ], + [ + 102, + 363 + ], + [ + 94, + 355 + ], + [ + 84, + 348 + ], + [ + 75, + 341 + ], + [ + 71, + 334 + ], + [ + 90, + 333 + ], + [ + 113, + 339 + ], + [ + 113, + 322 + ], + [ + 113, + 305 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 222, + 436 + ], + [ + 240, + 419 + ], + [ + 288, + 408 + ], + [ + 334, + 408 + ], + [ + 386, + 409 + ], + [ + 400, + 417 + ], + [ + 416, + 469 + ], + [ + 418, + 529 + ], + [ + 418, + 546 + ], + [ + 413, + 554 + ], + [ + 403, + 557 + ], + [ + 396, + 556 + ], + [ + 390, + 550 + ], + [ + 387, + 546 + ], + [ + 374, + 545 + ], + [ + 362, + 545 + ], + [ + 354, + 547 + ], + [ + 353, + 554 + ], + [ + 351, + 561 + ], + [ + 348, + 565 + ], + [ + 335, + 566 + ], + [ + 326, + 564 + ], + [ + 321, + 556 + ], + [ + 304, + 557 + ], + [ + 290, + 560 + ], + [ + 278, + 566 + ], + [ + 266, + 565 + ], + [ + 262, + 560 + ], + [ + 240, + 560 + ], + [ + 217, + 563 + ], + [ + 213, + 574 + ], + [ + 201, + 578 + ], + [ + 187, + 575 + ], + [ + 180, + 564 + ], + [ + 177, + 549 + ], + [ + 173, + 529 + ], + [ + 172, + 504 + ], + [ + 185, + 481 + ], + [ + 190, + 473 + ], + [ + 195, + 462 + ], + [ + 204, + 458 + ], + [ + 213, + 450 + ], + [ + 218, + 442 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 29, + 450 + ], + [ + 62, + 449 + ], + [ + 90, + 455 + ], + [ + 118, + 486 + ], + [ + 130, + 525 + ], + [ + 128, + 565 + ], + [ + 118, + 580 + ], + [ + 87, + 584 + ], + [ + 72, + 576 + ], + [ + 36, + 580 + ], + [ + 18, + 584 + ], + [ + 7, + 596 + ], + [ + 0, + 598 + ], + [ + 0, + 457 + ], + [ + 12, + 451 + ], + [ + 22, + 450 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1120, + 477 + ], + [ + 1112, + 479 + ], + [ + 1102, + 479 + ], + [ + 1084, + 479 + ], + [ + 1072, + 480 + ], + [ + 1043, + 484 + ], + [ + 1129, + 485 + ], + [ + 1137, + 486 + ], + [ + 1153, + 475 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1120, + 477 + ], + [ + 1112, + 479 + ], + [ + 1102, + 479 + ], + [ + 1084, + 479 + ], + [ + 1072, + 480 + ], + [ + 1043, + 484 + ], + [ + 1129, + 485 + ], + [ + 1137, + 486 + ], + [ + 1153, + 475 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 815, + 386 + ], + [ + 805, + 373 + ], + [ + 803, + 361 + ], + [ + 799, + 355 + ], + [ + 794, + 353 + ], + [ + 784, + 344 + ], + [ + 780, + 339 + ], + [ + 772, + 339 + ], + [ + 764, + 339 + ], + [ + 756, + 339 + ], + [ + 747, + 333 + ], + [ + 745, + 320 + ], + [ + 745, + 301 + ], + [ + 742, + 294 + ], + [ + 742, + 283 + ], + [ + 746, + 274 + ], + [ + 745, + 265 + ], + [ + 745, + 256 + ], + [ + 753, + 244 + ], + [ + 762, + 243 + ], + [ + 774, + 239 + ], + [ + 783, + 228 + ], + [ + 787, + 222 + ], + [ + 798, + 214 + ], + [ + 801, + 197 + ], + [ + 801, + 182 + ], + [ + 812, + 170 + ], + [ + 820, + 165 + ], + [ + 822, + 173 + ], + [ + 825, + 191 + ], + [ + 831, + 202 + ], + [ + 841, + 209 + ], + [ + 847, + 209 + ], + [ + 857, + 215 + ], + [ + 862, + 232 + ], + [ + 871, + 238 + ], + [ + 873, + 237 + ], + [ + 879, + 235 + ], + [ + 884, + 235 + ], + [ + 892, + 247 + ], + [ + 893, + 264 + ], + [ + 892, + 275 + ], + [ + 893, + 289 + ], + [ + 898, + 309 + ], + [ + 902, + 336 + ], + [ + 901, + 349 + ], + [ + 902, + 359 + ], + [ + 906, + 369 + ], + [ + 907, + 377 + ], + [ + 891, + 382 + ], + [ + 881, + 382 + ], + [ + 877, + 387 + ], + [ + 859, + 388 + ], + [ + 842, + 380 + ], + [ + 835, + 388 + ], + [ + 826, + 393 + ], + [ + 829, + 447 + ], + [ + 835, + 462 + ], + [ + 836, + 470 + ], + [ + 830, + 475 + ], + [ + 819, + 475 + ], + [ + 816, + 471 + ], + [ + 818, + 455 + ], + [ + 818, + 438 + ], + [ + 817, + 397 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 849, + 407 + ], + [ + 850, + 392 + ], + [ + 855, + 386 + ], + [ + 859, + 386 + ], + [ + 865, + 395 + ], + [ + 867, + 410 + ], + [ + 870, + 427 + ], + [ + 870, + 444 + ], + [ + 865, + 459 + ], + [ + 859, + 472 + ], + [ + 849, + 470 + ], + [ + 844, + 466 + ], + [ + 842, + 455 + ], + [ + 844, + 442 + ], + [ + 846, + 429 + ], + [ + 849, + 411 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 920, + 409 + ], + [ + 922, + 475 + ], + [ + 895, + 476 + ], + [ + 859, + 474 + ], + [ + 856, + 422 + ], + [ + 847, + 417 + ], + [ + 848, + 416 + ], + [ + 875, + 411 + ], + [ + 914, + 410 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 913, + 408 + ], + [ + 909, + 477 + ], + [ + 907, + 475 + ], + [ + 909, + 408 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 920, + 382 + ], + [ + 920, + 410 + ], + [ + 900, + 411 + ], + [ + 900, + 381 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 890, + 434 + ], + [ + 888, + 465 + ], + [ + 888, + 476 + ], + [ + 880, + 476 + ], + [ + 881, + 470 + ], + [ + 878, + 464 + ], + [ + 878, + 455 + ], + [ + 881, + 442 + ], + [ + 882, + 432 + ], + [ + 884, + 429 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 917, + 436 + ], + [ + 918, + 460 + ], + [ + 903, + 461 + ], + [ + 902, + 436 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 976, + 420 + ], + [ + 976, + 405 + ], + [ + 966, + 399 + ], + [ + 959, + 396 + ], + [ + 948, + 396 + ], + [ + 937, + 392 + ], + [ + 934, + 381 + ], + [ + 921, + 375 + ], + [ + 909, + 369 + ], + [ + 904, + 362 + ], + [ + 898, + 350 + ], + [ + 896, + 332 + ], + [ + 892, + 302 + ], + [ + 891, + 296 + ], + [ + 893, + 288 + ], + [ + 891, + 281 + ], + [ + 892, + 276 + ], + [ + 902, + 274 + ], + [ + 910, + 282 + ], + [ + 922, + 284 + ], + [ + 924, + 279 + ], + [ + 913, + 274 + ], + [ + 906, + 268 + ], + [ + 903, + 261 + ], + [ + 902, + 252 + ], + [ + 902, + 243 + ], + [ + 909, + 240 + ], + [ + 919, + 248 + ], + [ + 929, + 249 + ], + [ + 933, + 235 + ], + [ + 933, + 227 + ], + [ + 934, + 222 + ], + [ + 939, + 217 + ], + [ + 949, + 214 + ], + [ + 971, + 208 + ], + [ + 997, + 203 + ], + [ + 1019, + 223 + ], + [ + 1028, + 232 + ], + [ + 1027, + 255 + ], + [ + 1031, + 261 + ], + [ + 1041, + 266 + ], + [ + 1051, + 271 + ], + [ + 1053, + 281 + ], + [ + 1046, + 292 + ], + [ + 1039, + 302 + ], + [ + 1039, + 308 + ], + [ + 1045, + 313 + ], + [ + 1059, + 322 + ], + [ + 1062, + 345 + ], + [ + 1058, + 352 + ], + [ + 1054, + 355 + ], + [ + 1042, + 366 + ], + [ + 1028, + 373 + ], + [ + 1019, + 381 + ], + [ + 994, + 396 + ], + [ + 985, + 406 + ], + [ + 983, + 421 + ], + [ + 992, + 422 + ], + [ + 1017, + 420 + ], + [ + 1019, + 423 + ], + [ + 1020, + 436 + ], + [ + 1012, + 451 + ], + [ + 1007, + 468 + ], + [ + 1024, + 467 + ], + [ + 1028, + 473 + ], + [ + 1011, + 478 + ], + [ + 972, + 478 + ], + [ + 944, + 480 + ], + [ + 933, + 476 + ], + [ + 932, + 466 + ], + [ + 946, + 460 + ], + [ + 950, + 449 + ], + [ + 950, + 437 + ], + [ + 955, + 428 + ], + [ + 964, + 422 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1168, + 498 + ], + [ + 1163, + 499 + ], + [ + 1214, + 533 + ], + [ + 1214, + 521 + ], + [ + 1220, + 521 + ], + [ + 1234, + 475 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1004, + 384 + ], + [ + 1006, + 471 + ], + [ + 1004, + 471 + ], + [ + 1000, + 383 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1168, + 498 + ], + [ + 1163, + 499 + ], + [ + 1214, + 533 + ], + [ + 1214, + 521 + ], + [ + 1220, + 521 + ], + [ + 1234, + 475 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 998, + 388 + ], + [ + 996, + 477 + ], + [ + 1000, + 479 + ], + [ + 1003, + 382 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1030, + 379 + ], + [ + 1029, + 390 + ], + [ + 988, + 390 + ], + [ + 988, + 382 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1029, + 368 + ], + [ + 1029, + 377 + ], + [ + 988, + 381 + ], + [ + 988, + 371 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1102, + 434 + ], + [ + 1117, + 432 + ], + [ + 1141, + 430 + ], + [ + 1151, + 437 + ], + [ + 1148, + 468 + ], + [ + 1137, + 473 + ], + [ + 1130, + 477 + ], + [ + 1124, + 480 + ], + [ + 1117, + 477 + ], + [ + 1115, + 477 + ], + [ + 1111, + 482 + ], + [ + 1101, + 481 + ], + [ + 1098, + 478 + ], + [ + 1088, + 479 + ], + [ + 1078, + 482 + ], + [ + 1072, + 479 + ], + [ + 1068, + 468 + ], + [ + 1070, + 456 + ], + [ + 1077, + 449 + ], + [ + 1094, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1163, + 427 + ], + [ + 1178, + 425 + ], + [ + 1194, + 422 + ], + [ + 1183, + 468 + ], + [ + 1149, + 478 + ], + [ + 1142, + 477 + ], + [ + 1138, + 471 + ], + [ + 1136, + 459 + ], + [ + 1137, + 451 + ], + [ + 1151, + 435 + ], + [ + 1156, + 429 + ], + [ + 1159, + 428 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1099, + 245 + ], + [ + 1100, + 240 + ], + [ + 1094, + 232 + ], + [ + 1082, + 228 + ], + [ + 1059, + 228 + ], + [ + 1040, + 233 + ], + [ + 1014, + 239 + ], + [ + 993, + 243 + ], + [ + 972, + 244 + ], + [ + 962, + 236 + ], + [ + 962, + 227 + ], + [ + 962, + 213 + ], + [ + 965, + 193 + ], + [ + 970, + 178 + ], + [ + 984, + 171 + ], + [ + 984, + 159 + ], + [ + 984, + 150 + ], + [ + 987, + 139 + ], + [ + 980, + 129 + ], + [ + 970, + 124 + ], + [ + 962, + 116 + ], + [ + 954, + 103 + ], + [ + 949, + 89 + ], + [ + 947, + 77 + ], + [ + 935, + 77 + ], + [ + 906, + 65 + ], + [ + 902, + 46 + ], + [ + 887, + 35 + ], + [ + 878, + 20 + ], + [ + 876, + 10 + ], + [ + 892, + 9 + ], + [ + 906, + 8 + ], + [ + 920, + 6 + ], + [ + 939, + 0 + ], + [ + 1218, + 0 + ], + [ + 1215, + 207 + ], + [ + 1217, + 219 + ], + [ + 1226, + 223 + ], + [ + 1233, + 228 + ], + [ + 1232, + 242 + ], + [ + 1228, + 253 + ], + [ + 1238, + 250 + ], + [ + 1246, + 258 + ], + [ + 1243, + 274 + ], + [ + 1232, + 294 + ], + [ + 1227, + 302 + ], + [ + 1232, + 306 + ], + [ + 1241, + 307 + ], + [ + 1247, + 313 + ], + [ + 1243, + 319 + ], + [ + 1228, + 323 + ], + [ + 1217, + 331 + ], + [ + 1210, + 341 + ], + [ + 1215, + 358 + ], + [ + 1216, + 370 + ], + [ + 1215, + 381 + ], + [ + 1211, + 394 + ], + [ + 1211, + 402 + ], + [ + 1218, + 402 + ], + [ + 1222, + 403 + ], + [ + 1227, + 410 + ], + [ + 1229, + 421 + ], + [ + 1237, + 437 + ], + [ + 1168, + 492 + ], + [ + 1157, + 495 + ], + [ + 1148, + 490 + ], + [ + 1145, + 481 + ], + [ + 1152, + 457 + ], + [ + 1155, + 449 + ], + [ + 1159, + 440 + ], + [ + 1157, + 418 + ], + [ + 1152, + 412 + ], + [ + 1146, + 394 + ], + [ + 1144, + 379 + ], + [ + 1127, + 354 + ], + [ + 1111, + 339 + ], + [ + 1106, + 324 + ], + [ + 1098, + 305 + ], + [ + 1089, + 296 + ], + [ + 1086, + 289 + ], + [ + 1087, + 284 + ], + [ + 1094, + 284 + ], + [ + 1104, + 290 + ], + [ + 1107, + 287 + ], + [ + 1105, + 277 + ], + [ + 1104, + 270 + ], + [ + 1103, + 261 + ], + [ + 1102, + 258 + ], + [ + 1101, + 250 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1177, + 441 + ], + [ + 1184, + 430 + ], + [ + 1209, + 424 + ], + [ + 1237, + 425 + ], + [ + 1250, + 433 + ], + [ + 1224, + 495 + ], + [ + 1203, + 497 + ], + [ + 1189, + 499 + ], + [ + 1189, + 504 + ], + [ + 1185, + 505 + ], + [ + 1174, + 505 + ], + [ + 1167, + 499 + ], + [ + 1169, + 477 + ], + [ + 1169, + 458 + ], + [ + 1163, + 458 + ], + [ + 1160, + 451 + ], + [ + 1160, + 448 + ], + [ + 1172, + 446 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1285, + 380 + ], + [ + 1286, + 426 + ], + [ + 1282, + 422 + ], + [ + 1281, + 381 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1291, + 374 + ], + [ + 1291, + 384 + ], + [ + 1272, + 384 + ], + [ + 1272, + 373 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1292, + 364 + ], + [ + 1291, + 374 + ], + [ + 1271, + 373 + ], + [ + 1271, + 366 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1281, + 342 + ], + [ + 1291, + 346 + ], + [ + 1291, + 356 + ], + [ + 1287, + 364 + ], + [ + 1277, + 364 + ], + [ + 1272, + 362 + ], + [ + 1269, + 355 + ], + [ + 1272, + 347 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1243, + 419 + ], + [ + 1267, + 415 + ], + [ + 1291, + 415 + ], + [ + 1305, + 420 + ], + [ + 1301, + 484 + ], + [ + 1262, + 511 + ], + [ + 1219, + 522 + ], + [ + 1215, + 519 + ], + [ + 1213, + 496 + ], + [ + 1214, + 477 + ], + [ + 1218, + 458 + ], + [ + 1208, + 457 + ], + [ + 1204, + 454 + ], + [ + 1204, + 450 + ], + [ + 1209, + 448 + ], + [ + 1222, + 452 + ], + [ + 1229, + 431 + ], + [ + 1235, + 422 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1277, + 529 + ], + [ + 1224, + 535 + ], + [ + 1214, + 531 + ], + [ + 1214, + 522 + ], + [ + 1238, + 516 + ], + [ + 1271, + 511 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1252, + 453 + ], + [ + 1258, + 518 + ], + [ + 1248, + 518 + ], + [ + 1244, + 450 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1261, + 531 + ], + [ + 1217, + 534 + ], + [ + 1404, + 651 + ], + [ + 1414, + 627 + ], + [ + 1558, + 596 + ], + [ + 1419, + 526 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1261, + 531 + ], + [ + 1217, + 534 + ], + [ + 1404, + 651 + ], + [ + 1414, + 627 + ], + [ + 1558, + 596 + ], + [ + 1419, + 526 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1689, + 719 + ], + [ + 1525, + 728 + ], + [ + 1989, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 809 + ], + [ + 1744, + 691 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1235, + 222 + ], + [ + 1223, + 213 + ], + [ + 1191, + 212 + ], + [ + 1173, + 212 + ], + [ + 1126, + 226 + ], + [ + 1100, + 215 + ], + [ + 1084, + 196 + ], + [ + 1076, + 174 + ], + [ + 1083, + 153 + ], + [ + 1078, + 129 + ], + [ + 1068, + 107 + ], + [ + 1068, + 83 + ], + [ + 1071, + 65 + ], + [ + 1086, + 48 + ], + [ + 1105, + 40 + ], + [ + 1137, + 31 + ], + [ + 1178, + 20 + ], + [ + 1203, + 11 + ], + [ + 1224, + 0 + ], + [ + 1295, + 0 + ], + [ + 1319, + 1 + ], + [ + 1335, + 11 + ], + [ + 1346, + 24 + ], + [ + 1352, + 32 + ], + [ + 1342, + 43 + ], + [ + 1333, + 55 + ], + [ + 1336, + 69 + ], + [ + 1336, + 77 + ], + [ + 1342, + 81 + ], + [ + 1355, + 87 + ], + [ + 1365, + 87 + ], + [ + 1372, + 88 + ], + [ + 1376, + 80 + ], + [ + 1377, + 73 + ], + [ + 1384, + 68 + ], + [ + 1396, + 66 + ], + [ + 1409, + 83 + ], + [ + 1417, + 86 + ], + [ + 1421, + 91 + ], + [ + 1413, + 103 + ], + [ + 1406, + 110 + ], + [ + 1408, + 116 + ], + [ + 1413, + 122 + ], + [ + 1402, + 139 + ], + [ + 1405, + 146 + ], + [ + 1410, + 150 + ], + [ + 1411, + 156 + ], + [ + 1393, + 171 + ], + [ + 1382, + 173 + ], + [ + 1367, + 180 + ], + [ + 1362, + 178 + ], + [ + 1352, + 181 + ], + [ + 1348, + 187 + ], + [ + 1349, + 202 + ], + [ + 1335, + 202 + ], + [ + 1320, + 199 + ], + [ + 1313, + 197 + ], + [ + 1309, + 205 + ], + [ + 1305, + 213 + ], + [ + 1292, + 221 + ], + [ + 1278, + 221 + ], + [ + 1270, + 226 + ], + [ + 1264, + 230 + ], + [ + 1252, + 231 + ], + [ + 1250, + 242 + ], + [ + 1252, + 265 + ], + [ + 1256, + 275 + ], + [ + 1257, + 303 + ], + [ + 1264, + 370 + ], + [ + 1270, + 415 + ], + [ + 1277, + 452 + ], + [ + 1281, + 489 + ], + [ + 1265, + 517 + ], + [ + 1260, + 436 + ], + [ + 1249, + 375 + ], + [ + 1240, + 295 + ], + [ + 1238, + 243 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1400, + 333 + ], + [ + 1404, + 421 + ], + [ + 1398, + 418 + ], + [ + 1395, + 331 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1394, + 304 + ], + [ + 1406, + 311 + ], + [ + 1409, + 325 + ], + [ + 1407, + 335 + ], + [ + 1402, + 340 + ], + [ + 1390, + 339 + ], + [ + 1382, + 331 + ], + [ + 1382, + 322 + ], + [ + 1382, + 311 + ], + [ + 1387, + 308 + ], + [ + 1390, + 307 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1283, + 440 + ], + [ + 1290, + 428 + ], + [ + 1306, + 414 + ], + [ + 1349, + 409 + ], + [ + 1387, + 408 + ], + [ + 1418, + 408 + ], + [ + 1444, + 413 + ], + [ + 1463, + 439 + ], + [ + 1475, + 465 + ], + [ + 1488, + 533 + ], + [ + 1490, + 550 + ], + [ + 1486, + 565 + ], + [ + 1479, + 572 + ], + [ + 1464, + 573 + ], + [ + 1457, + 562 + ], + [ + 1456, + 551 + ], + [ + 1446, + 552 + ], + [ + 1440, + 561 + ], + [ + 1431, + 563 + ], + [ + 1418, + 560 + ], + [ + 1415, + 552 + ], + [ + 1389, + 553 + ], + [ + 1371, + 554 + ], + [ + 1349, + 554 + ], + [ + 1335, + 557 + ], + [ + 1322, + 561 + ], + [ + 1320, + 563 + ], + [ + 1318, + 577 + ], + [ + 1313, + 582 + ], + [ + 1301, + 584 + ], + [ + 1293, + 580 + ], + [ + 1288, + 571 + ], + [ + 1272, + 569 + ], + [ + 1266, + 566 + ], + [ + 1260, + 534 + ], + [ + 1265, + 492 + ], + [ + 1266, + 480 + ], + [ + 1262, + 477 + ], + [ + 1256, + 475 + ], + [ + 1252, + 466 + ], + [ + 1257, + 462 + ], + [ + 1262, + 460 + ], + [ + 1271, + 461 + ], + [ + 1276, + 465 + ], + [ + 1279, + 454 + ], + [ + 1280, + 448 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1689, + 719 + ], + [ + 1525, + 728 + ], + [ + 1989, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 809 + ], + [ + 1744, + 691 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1486, + 475 + ], + [ + 1492, + 595 + ], + [ + 1480, + 597 + ], + [ + 1474, + 475 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1462, + 601 + ], + [ + 1471, + 586 + ], + [ + 1479, + 586 + ], + [ + 1491, + 588 + ], + [ + 1509, + 590 + ], + [ + 1766, + 590 + ], + [ + 1785, + 717 + ], + [ + 1684, + 721 + ], + [ + 1518, + 727 + ], + [ + 1492, + 710 + ], + [ + 1427, + 667 + ], + [ + 1403, + 651 + ], + [ + 1403, + 640 + ], + [ + 1413, + 624 + ], + [ + 1427, + 617 + ], + [ + 1440, + 613 + ], + [ + 1461, + 606 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1500, + 560 + ], + [ + 1530, + 564 + ], + [ + 1608, + 564 + ], + [ + 1596, + 625 + ], + [ + 1533, + 635 + ], + [ + 1507, + 622 + ], + [ + 1499, + 592 + ], + [ + 1496, + 577 + ], + [ + 1494, + 568 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1464, + 283 + ], + [ + 1454, + 282 + ], + [ + 1449, + 279 + ], + [ + 1436, + 275 + ], + [ + 1431, + 270 + ], + [ + 1442, + 263 + ], + [ + 1457, + 262 + ], + [ + 1472, + 255 + ], + [ + 1480, + 243 + ], + [ + 1474, + 228 + ], + [ + 1461, + 223 + ], + [ + 1459, + 215 + ], + [ + 1465, + 205 + ], + [ + 1465, + 196 + ], + [ + 1458, + 190 + ], + [ + 1443, + 195 + ], + [ + 1438, + 193 + ], + [ + 1437, + 184 + ], + [ + 1441, + 170 + ], + [ + 1441, + 165 + ], + [ + 1444, + 155 + ], + [ + 1449, + 148 + ], + [ + 1455, + 153 + ], + [ + 1462, + 165 + ], + [ + 1468, + 175 + ], + [ + 1476, + 187 + ], + [ + 1484, + 188 + ], + [ + 1487, + 176 + ], + [ + 1490, + 165 + ], + [ + 1499, + 160 + ], + [ + 1506, + 156 + ], + [ + 1510, + 147 + ], + [ + 1508, + 140 + ], + [ + 1509, + 127 + ], + [ + 1515, + 118 + ], + [ + 1517, + 106 + ], + [ + 1540, + 92 + ], + [ + 1532, + 85 + ], + [ + 1537, + 76 + ], + [ + 1534, + 71 + ], + [ + 1521, + 72 + ], + [ + 1515, + 78 + ], + [ + 1502, + 77 + ], + [ + 1492, + 70 + ], + [ + 1484, + 69 + ], + [ + 1473, + 72 + ], + [ + 1460, + 75 + ], + [ + 1451, + 76 + ], + [ + 1426, + 60 + ], + [ + 1414, + 47 + ], + [ + 1409, + 32 + ], + [ + 1402, + 26 + ], + [ + 1398, + 17 + ], + [ + 1387, + 11 + ], + [ + 1379, + 5 + ], + [ + 1379, + 0 + ], + [ + 1831, + 0 + ], + [ + 1829, + 2 + ], + [ + 1829, + 13 + ], + [ + 1837, + 22 + ], + [ + 1851, + 35 + ], + [ + 1851, + 48 + ], + [ + 1850, + 57 + ], + [ + 1852, + 69 + ], + [ + 1841, + 79 + ], + [ + 1837, + 90 + ], + [ + 1843, + 95 + ], + [ + 1852, + 106 + ], + [ + 1875, + 109 + ], + [ + 1886, + 100 + ], + [ + 1891, + 93 + ], + [ + 1901, + 92 + ], + [ + 1913, + 98 + ], + [ + 1917, + 114 + ], + [ + 1899, + 135 + ], + [ + 1888, + 160 + ], + [ + 1901, + 167 + ], + [ + 1907, + 177 + ], + [ + 1918, + 184 + ], + [ + 1930, + 184 + ], + [ + 1943, + 187 + ], + [ + 1956, + 197 + ], + [ + 1940, + 209 + ], + [ + 1936, + 215 + ], + [ + 1944, + 226 + ], + [ + 1973, + 218 + ], + [ + 1981, + 223 + ], + [ + 1987, + 229 + ], + [ + 1999, + 232 + ], + [ + 2023, + 232 + ], + [ + 2039, + 230 + ], + [ + 2048, + 228 + ], + [ + 2048, + 332 + ], + [ + 2041, + 337 + ], + [ + 2033, + 334 + ], + [ + 2023, + 339 + ], + [ + 2022, + 350 + ], + [ + 2021, + 367 + ], + [ + 1721, + 581 + ], + [ + 1679, + 651 + ], + [ + 1656, + 667 + ], + [ + 1637, + 666 + ], + [ + 1619, + 657 + ], + [ + 1597, + 656 + ], + [ + 1581, + 648 + ], + [ + 1573, + 641 + ], + [ + 1572, + 627 + ], + [ + 1589, + 620 + ], + [ + 1598, + 610 + ], + [ + 1597, + 600 + ], + [ + 1588, + 598 + ], + [ + 1577, + 605 + ], + [ + 1558, + 599 + ], + [ + 1557, + 586 + ], + [ + 1551, + 571 + ], + [ + 1532, + 566 + ], + [ + 1501, + 556 + ], + [ + 1493, + 543 + ], + [ + 1499, + 528 + ], + [ + 1501, + 514 + ], + [ + 1493, + 502 + ], + [ + 1487, + 503 + ], + [ + 1469, + 508 + ], + [ + 1460, + 507 + ], + [ + 1456, + 502 + ], + [ + 1447, + 506 + ], + [ + 1429, + 510 + ], + [ + 1433, + 496 + ], + [ + 1450, + 484 + ], + [ + 1438, + 472 + ], + [ + 1421, + 472 + ], + [ + 1402, + 475 + ], + [ + 1379, + 481 + ], + [ + 1365, + 490 + ], + [ + 1358, + 498 + ], + [ + 1349, + 501 + ], + [ + 1349, + 491 + ], + [ + 1372, + 457 + ], + [ + 1400, + 420 + ], + [ + 1414, + 413 + ], + [ + 1416, + 394 + ], + [ + 1393, + 412 + ], + [ + 1390, + 409 + ], + [ + 1395, + 394 + ], + [ + 1404, + 380 + ], + [ + 1406, + 366 + ], + [ + 1407, + 354 + ], + [ + 1411, + 346 + ], + [ + 1419, + 346 + ], + [ + 1432, + 346 + ], + [ + 1439, + 338 + ], + [ + 1434, + 333 + ], + [ + 1420, + 332 + ], + [ + 1405, + 333 + ], + [ + 1392, + 333 + ], + [ + 1390, + 329 + ], + [ + 1391, + 320 + ], + [ + 1402, + 314 + ], + [ + 1428, + 311 + ], + [ + 1442, + 310 + ], + [ + 1449, + 309 + ], + [ + 1462, + 288 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1520, + 643 + ], + [ + 1535, + 640 + ], + [ + 1557, + 641 + ], + [ + 1573, + 639 + ], + [ + 1580, + 635 + ], + [ + 1584, + 651 + ], + [ + 1563, + 655 + ], + [ + 1548, + 657 + ], + [ + 1532, + 658 + ], + [ + 1526, + 658 + ], + [ + 1520, + 653 + ], + [ + 1516, + 641 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1551, + 490 + ], + [ + 1560, + 519 + ], + [ + 1565, + 668 + ], + [ + 1554, + 667 + ], + [ + 1547, + 582 + ], + [ + 1541, + 641 + ], + [ + 1529, + 642 + ], + [ + 1530, + 546 + ], + [ + 1529, + 505 + ], + [ + 1537, + 487 + ], + [ + 1545, + 486 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1655, + 680 + ], + [ + 1669, + 628 + ], + [ + 1681, + 589 + ], + [ + 1692, + 601 + ], + [ + 1684, + 633 + ], + [ + 1675, + 673 + ], + [ + 1667, + 684 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1756, + 491 + ], + [ + 1793, + 435 + ], + [ + 1820, + 400 + ], + [ + 1861, + 375 + ], + [ + 1923, + 355 + ], + [ + 2001, + 348 + ], + [ + 2048, + 340 + ], + [ + 2048, + 883 + ], + [ + 2038, + 915 + ], + [ + 2022, + 921 + ], + [ + 1994, + 924 + ], + [ + 1959, + 906 + ], + [ + 1945, + 876 + ], + [ + 1931, + 839 + ], + [ + 1924, + 816 + ], + [ + 1921, + 791 + ], + [ + 1903, + 778 + ], + [ + 1873, + 768 + ], + [ + 1844, + 757 + ], + [ + 1834, + 754 + ], + [ + 1817, + 748 + ], + [ + 1806, + 743 + ], + [ + 1792, + 737 + ], + [ + 1772, + 728 + ], + [ + 1768, + 732 + ], + [ + 1765, + 750 + ], + [ + 1761, + 764 + ], + [ + 1754, + 771 + ], + [ + 1742, + 775 + ], + [ + 1723, + 771 + ], + [ + 1706, + 768 + ], + [ + 1688, + 743 + ], + [ + 1681, + 684 + ], + [ + 1677, + 631 + ], + [ + 1678, + 596 + ], + [ + 1683, + 574 + ], + [ + 1697, + 552 + ], + [ + 1713, + 537 + ], + [ + 1709, + 527 + ], + [ + 1705, + 517 + ], + [ + 1709, + 504 + ], + [ + 1718, + 496 + ], + [ + 1725, + 493 + ], + [ + 1742, + 491 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1364, + 514 + ], + [ + 1427, + 511 + ], + [ + 1428, + 524 + ], + [ + 1364, + 530 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000011_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000011_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..7b8858e99e93510ef77d9bf5c32ea41a2b51f617 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000011_000019_gtFine_polygons.json @@ -0,0 +1,4421 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1265, + 1 + ], + [ + 1256, + 305 + ], + [ + 0, + 394 + ], + [ + 1, + 2 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 0, + 516 + ], + [ + 1529, + 505 + ], + [ + 2048, + 483 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 513 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 7, + 34 + ], + [ + 26, + 35 + ], + [ + 26, + 17 + ], + [ + 32, + 18 + ], + [ + 32, + 35 + ], + [ + 43, + 44 + ], + [ + 177, + 37 + ], + [ + 177, + 24 + ], + [ + 190, + 25 + ], + [ + 197, + 30 + ], + [ + 208, + 31 + ], + [ + 210, + 36 + ], + [ + 333, + 30 + ], + [ + 334, + 27 + ], + [ + 334, + 20 + ], + [ + 342, + 18 + ], + [ + 342, + 8 + ], + [ + 354, + 8 + ], + [ + 358, + 21 + ], + [ + 362, + 22 + ], + [ + 364, + 32 + ], + [ + 377, + 30 + ], + [ + 380, + 12 + ], + [ + 381, + 12 + ], + [ + 382, + 31 + ], + [ + 389, + 30 + ], + [ + 389, + 11 + ], + [ + 379, + 0 + ], + [ + 415, + 0 + ], + [ + 408, + 1 + ], + [ + 401, + 16 + ], + [ + 411, + 21 + ], + [ + 412, + 6 + ], + [ + 417, + 4 + ], + [ + 418, + 9 + ], + [ + 423, + 18 + ], + [ + 431, + 26 + ], + [ + 458, + 0 + ], + [ + 548, + 2 + ], + [ + 564, + 16 + ], + [ + 565, + 2 + ], + [ + 579, + 1 + ], + [ + 583, + 24 + ], + [ + 610, + 31 + ], + [ + 610, + 22 + ], + [ + 617, + 17 + ], + [ + 616, + 29 + ], + [ + 619, + 32 + ], + [ + 621, + 40 + ], + [ + 660, + 76 + ], + [ + 662, + 83 + ], + [ + 667, + 87 + ], + [ + 666, + 107 + ], + [ + 684, + 106 + ], + [ + 690, + 111 + ], + [ + 693, + 129 + ], + [ + 725, + 157 + ], + [ + 726, + 171 + ], + [ + 740, + 180 + ], + [ + 730, + 191 + ], + [ + 729, + 242 + ], + [ + 734, + 325 + ], + [ + 195, + 409 + ], + [ + 0, + 409 + ], + [ + 0, + 47 + ], + [ + 9, + 46 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 211, + 351 + ], + [ + 211, + 339 + ], + [ + 210, + 333 + ], + [ + 208, + 326 + ], + [ + 211, + 321 + ], + [ + 219, + 317 + ], + [ + 225, + 313 + ], + [ + 228, + 305 + ], + [ + 221, + 300 + ], + [ + 213, + 295 + ], + [ + 206, + 282 + ], + [ + 204, + 271 + ], + [ + 204, + 262 + ], + [ + 210, + 257 + ], + [ + 215, + 251 + ], + [ + 210, + 247 + ], + [ + 205, + 238 + ], + [ + 203, + 219 + ], + [ + 208, + 215 + ], + [ + 216, + 211 + ], + [ + 227, + 202 + ], + [ + 241, + 185 + ], + [ + 249, + 168 + ], + [ + 266, + 159 + ], + [ + 277, + 159 + ], + [ + 288, + 159 + ], + [ + 301, + 165 + ], + [ + 315, + 166 + ], + [ + 329, + 175 + ], + [ + 346, + 166 + ], + [ + 356, + 162 + ], + [ + 371, + 171 + ], + [ + 386, + 178 + ], + [ + 392, + 184 + ], + [ + 381, + 200 + ], + [ + 369, + 204 + ], + [ + 378, + 214 + ], + [ + 394, + 223 + ], + [ + 410, + 231 + ], + [ + 427, + 227 + ], + [ + 428, + 233 + ], + [ + 416, + 247 + ], + [ + 408, + 259 + ], + [ + 408, + 270 + ], + [ + 416, + 277 + ], + [ + 422, + 282 + ], + [ + 426, + 299 + ], + [ + 423, + 304 + ], + [ + 427, + 310 + ], + [ + 436, + 311 + ], + [ + 447, + 309 + ], + [ + 445, + 325 + ], + [ + 444, + 335 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 522, + 321 + ], + [ + 526, + 294 + ], + [ + 534, + 279 + ], + [ + 542, + 271 + ], + [ + 555, + 253 + ], + [ + 559, + 247 + ], + [ + 571, + 241 + ], + [ + 585, + 233 + ], + [ + 593, + 228 + ], + [ + 602, + 232 + ], + [ + 609, + 245 + ], + [ + 621, + 257 + ], + [ + 631, + 265 + ], + [ + 641, + 272 + ], + [ + 651, + 279 + ], + [ + 655, + 280 + ], + [ + 666, + 291 + ], + [ + 673, + 306 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 675, + 311 + ], + [ + 671, + 300 + ], + [ + 670, + 293 + ], + [ + 665, + 283 + ], + [ + 663, + 274 + ], + [ + 667, + 262 + ], + [ + 669, + 256 + ], + [ + 672, + 244 + ], + [ + 678, + 235 + ], + [ + 682, + 233 + ], + [ + 694, + 236 + ], + [ + 708, + 233 + ], + [ + 719, + 234 + ], + [ + 727, + 234 + ], + [ + 728, + 226 + ], + [ + 727, + 216 + ], + [ + 734, + 203 + ], + [ + 744, + 195 + ], + [ + 749, + 190 + ], + [ + 751, + 181 + ], + [ + 764, + 172 + ], + [ + 774, + 163 + ], + [ + 782, + 162 + ], + [ + 789, + 167 + ], + [ + 796, + 166 + ], + [ + 808, + 164 + ], + [ + 820, + 171 + ], + [ + 828, + 174 + ], + [ + 843, + 172 + ], + [ + 856, + 173 + ], + [ + 868, + 176 + ], + [ + 879, + 184 + ], + [ + 888, + 192 + ], + [ + 905, + 187 + ], + [ + 908, + 184 + ], + [ + 923, + 184 + ], + [ + 926, + 191 + ], + [ + 940, + 199 + ], + [ + 946, + 197 + ], + [ + 954, + 188 + ], + [ + 961, + 184 + ], + [ + 966, + 183 + ], + [ + 967, + 185 + ], + [ + 971, + 193 + ], + [ + 971, + 201 + ], + [ + 968, + 210 + ], + [ + 980, + 211 + ], + [ + 991, + 209 + ], + [ + 1000, + 209 + ], + [ + 1004, + 216 + ], + [ + 1003, + 231 + ], + [ + 999, + 246 + ], + [ + 982, + 255 + ], + [ + 976, + 261 + ], + [ + 976, + 262 + ], + [ + 984, + 265 + ], + [ + 997, + 263 + ], + [ + 1016, + 270 + ], + [ + 1019, + 290 + ], + [ + 793, + 316 + ], + [ + 699, + 318 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1479, + 466 + ], + [ + 1433, + 510 + ], + [ + 1383, + 512 + ], + [ + 1313, + 509 + ], + [ + 1309, + 483 + ], + [ + 1328, + 463 + ], + [ + 1346, + 454 + ], + [ + 1460, + 465 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1177, + 294 + ], + [ + 1169, + 191 + ], + [ + 1161, + 185 + ], + [ + 1169, + 165 + ], + [ + 1169, + 154 + ], + [ + 1183, + 118 + ], + [ + 1184, + 93 + ], + [ + 1198, + 80 + ], + [ + 1197, + 61 + ], + [ + 1202, + 57 + ], + [ + 1202, + 52 + ], + [ + 1212, + 28 + ], + [ + 1209, + 18 + ], + [ + 1219, + 13 + ], + [ + 1219, + 0 + ], + [ + 2048, + 2 + ], + [ + 2048, + 491 + ], + [ + 1446, + 480 + ], + [ + 1391, + 470 + ], + [ + 1278, + 441 + ], + [ + 1215, + 370 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1370, + 549 + ], + [ + 1420, + 548 + ], + [ + 1508, + 577 + ], + [ + 1500, + 643 + ], + [ + 1471, + 656 + ], + [ + 1405, + 657 + ], + [ + 1329, + 656 + ], + [ + 1303, + 645 + ], + [ + 1265, + 613 + ], + [ + 1245, + 594 + ], + [ + 1238, + 582 + ], + [ + 1241, + 571 + ], + [ + 1274, + 560 + ], + [ + 1323, + 555 + ], + [ + 1341, + 553 + ], + [ + 1352, + 553 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1614, + 199 + ], + [ + 1660, + 198 + ], + [ + 1685, + 200 + ], + [ + 1688, + 207 + ], + [ + 1705, + 211 + ], + [ + 1705, + 215 + ], + [ + 1688, + 225 + ], + [ + 1682, + 262 + ], + [ + 1701, + 266 + ], + [ + 1702, + 276 + ], + [ + 1687, + 283 + ], + [ + 1683, + 286 + ], + [ + 1680, + 291 + ], + [ + 1669, + 295 + ], + [ + 1634, + 294 + ], + [ + 1620, + 295 + ], + [ + 1613, + 291 + ], + [ + 1606, + 288 + ], + [ + 1608, + 208 + ], + [ + 1613, + 207 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1599, + 219 + ], + [ + 1611, + 220 + ], + [ + 1618, + 228 + ], + [ + 1623, + 238 + ], + [ + 1625, + 248 + ], + [ + 1625, + 259 + ], + [ + 1622, + 269 + ], + [ + 1616, + 280 + ], + [ + 1612, + 284 + ], + [ + 1603, + 289 + ], + [ + 1603, + 272 + ], + [ + 1599, + 231 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1601, + 26 + ], + [ + 1568, + 24 + ], + [ + 1567, + 0 + ], + [ + 1616, + 0 + ], + [ + 1616, + 10 + ], + [ + 1608, + 15 + ], + [ + 1605, + 21 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1637, + 328 + ], + [ + 1635, + 192 + ], + [ + 1638, + 177 + ], + [ + 1635, + 46 + ], + [ + 1638, + 17 + ], + [ + 1638, + 0 + ], + [ + 2047, + 2 + ], + [ + 2048, + 287 + ], + [ + 1813, + 293 + ], + [ + 1813, + 263 + ], + [ + 1720, + 268 + ], + [ + 1722, + 289 + ], + [ + 1715, + 289 + ], + [ + 1714, + 303 + ], + [ + 1706, + 303 + ], + [ + 1704, + 289 + ], + [ + 1691, + 284 + ], + [ + 1690, + 269 + ], + [ + 1693, + 262 + ], + [ + 1693, + 256 + ], + [ + 1672, + 258 + ], + [ + 1673, + 308 + ], + [ + 1666, + 299 + ], + [ + 1666, + 294 + ], + [ + 1657, + 294 + ], + [ + 1656, + 305 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1422, + 221 + ], + [ + 1432, + 516 + ], + [ + 1420, + 543 + ], + [ + 1412, + 222 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1386, + 315 + ], + [ + 1394, + 322 + ], + [ + 1397, + 330 + ], + [ + 1398, + 343 + ], + [ + 1397, + 347 + ], + [ + 1393, + 353 + ], + [ + 1389, + 355 + ], + [ + 1387, + 320 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1436, + 305 + ], + [ + 1445, + 309 + ], + [ + 1455, + 310 + ], + [ + 1456, + 353 + ], + [ + 1446, + 355 + ], + [ + 1442, + 362 + ], + [ + 1432, + 361 + ], + [ + 1430, + 304 + ], + [ + 1432, + 305 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1399, + 305 + ], + [ + 1417, + 307 + ], + [ + 1418, + 361 + ], + [ + 1401, + 360 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1475, + 253 + ], + [ + 1475, + 268 + ], + [ + 1458, + 270 + ], + [ + 1458, + 278 + ], + [ + 1441, + 277 + ], + [ + 1440, + 270 + ], + [ + 1424, + 269 + ], + [ + 1424, + 256 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1198, + 87 + ], + [ + 1202, + 292 + ], + [ + 1190, + 286 + ], + [ + 1183, + 86 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1013, + 303 + ], + [ + 987, + 289 + ], + [ + 983, + 275 + ], + [ + 989, + 266 + ], + [ + 990, + 262 + ], + [ + 990, + 252 + ], + [ + 994, + 241 + ], + [ + 998, + 230 + ], + [ + 994, + 226 + ], + [ + 986, + 225 + ], + [ + 982, + 216 + ], + [ + 983, + 207 + ], + [ + 986, + 202 + ], + [ + 987, + 202 + ], + [ + 992, + 198 + ], + [ + 999, + 198 + ], + [ + 1003, + 194 + ], + [ + 1006, + 187 + ], + [ + 1008, + 183 + ], + [ + 1011, + 180 + ], + [ + 1018, + 184 + ], + [ + 1018, + 194 + ], + [ + 1023, + 198 + ], + [ + 1029, + 197 + ], + [ + 1033, + 191 + ], + [ + 1040, + 186 + ], + [ + 1039, + 180 + ], + [ + 1039, + 170 + ], + [ + 1045, + 163 + ], + [ + 1045, + 159 + ], + [ + 1046, + 146 + ], + [ + 1054, + 142 + ], + [ + 1059, + 142 + ], + [ + 1067, + 135 + ], + [ + 1082, + 134 + ], + [ + 1100, + 129 + ], + [ + 1116, + 128 + ], + [ + 1127, + 135 + ], + [ + 1142, + 143 + ], + [ + 1161, + 151 + ], + [ + 1165, + 159 + ], + [ + 1169, + 172 + ], + [ + 1184, + 186 + ], + [ + 1188, + 201 + ], + [ + 1197, + 215 + ], + [ + 1200, + 229 + ], + [ + 1204, + 237 + ], + [ + 1213, + 239 + ], + [ + 1219, + 245 + ], + [ + 1218, + 254 + ], + [ + 1219, + 266 + ], + [ + 1223, + 282 + ], + [ + 1087, + 298 + ], + [ + 1036, + 298 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1140, + 104 + ], + [ + 1150, + 280 + ], + [ + 1142, + 275 + ], + [ + 1132, + 104 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1321, + 52 + ], + [ + 1335, + 468 + ], + [ + 1313, + 440 + ], + [ + 1300, + 55 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1294, + 161 + ], + [ + 1330, + 164 + ], + [ + 1328, + 229 + ], + [ + 1300, + 232 + ], + [ + 1298, + 220 + ], + [ + 1288, + 211 + ], + [ + 1293, + 204 + ], + [ + 1294, + 201 + ], + [ + 1294, + 186 + ], + [ + 1288, + 178 + ], + [ + 1290, + 173 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1333, + 0 + ], + [ + 1347, + 583 + ], + [ + 1379, + 581 + ], + [ + 1375, + 0 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1311, + 37 + ], + [ + 1311, + 26 + ], + [ + 1304, + 19 + ], + [ + 1300, + 11 + ], + [ + 1302, + 3 + ], + [ + 1306, + 0 + ], + [ + 1364, + 0 + ], + [ + 1365, + 79 + ], + [ + 1331, + 81 + ], + [ + 1326, + 74 + ], + [ + 1317, + 73 + ], + [ + 1310, + 68 + ], + [ + 1302, + 57 + ], + [ + 1301, + 47 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1376, + 0 + ], + [ + 1397, + 596 + ], + [ + 1374, + 596 + ], + [ + 1371, + 465 + ], + [ + 1374, + 453 + ], + [ + 1374, + 371 + ], + [ + 1368, + 370 + ], + [ + 1366, + 349 + ], + [ + 1372, + 348 + ], + [ + 1366, + 130 + ], + [ + 1363, + 1 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1418, + 0 + ], + [ + 1420, + 78 + ], + [ + 1390, + 76 + ], + [ + 1382, + 70 + ], + [ + 1382, + 59 + ], + [ + 1375, + 55 + ], + [ + 1373, + 48 + ], + [ + 1373, + 43 + ], + [ + 1379, + 35 + ], + [ + 1379, + 17 + ], + [ + 1372, + 8 + ], + [ + 1374, + 0 + ], + [ + 1392, + 0 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1331, + 146 + ], + [ + 1357, + 142 + ], + [ + 1371, + 143 + ], + [ + 1368, + 243 + ], + [ + 1339, + 245 + ], + [ + 1329, + 239 + ], + [ + 1328, + 225 + ], + [ + 1330, + 223 + ], + [ + 1349, + 218 + ], + [ + 1347, + 212 + ], + [ + 1327, + 203 + ], + [ + 1326, + 187 + ], + [ + 1337, + 185 + ], + [ + 1338, + 171 + ], + [ + 1326, + 165 + ], + [ + 1322, + 152 + ], + [ + 1324, + 145 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1420, + 84 + ], + [ + 1438, + 77 + ], + [ + 1454, + 77 + ], + [ + 1469, + 81 + ], + [ + 1481, + 90 + ], + [ + 1487, + 115 + ], + [ + 1487, + 136 + ], + [ + 1476, + 151 + ], + [ + 1459, + 158 + ], + [ + 1429, + 158 + ], + [ + 1412, + 151 + ], + [ + 1403, + 136 + ], + [ + 1403, + 108 + ], + [ + 1411, + 95 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1502, + 154 + ], + [ + 1506, + 161 + ], + [ + 1451, + 259 + ], + [ + 1445, + 261 + ], + [ + 1435, + 254 + ], + [ + 1395, + 174 + ], + [ + 1398, + 159 + ], + [ + 1432, + 159 + ], + [ + 1488, + 156 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1397, + 131 + ], + [ + 1403, + 248 + ], + [ + 1395, + 253 + ], + [ + 1389, + 256 + ], + [ + 1376, + 256 + ], + [ + 1369, + 248 + ], + [ + 1362, + 240 + ], + [ + 1356, + 137 + ], + [ + 1362, + 130 + ], + [ + 1364, + 125 + ], + [ + 1385, + 124 + ], + [ + 1388, + 131 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1393, + 408 + ], + [ + 1418, + 405 + ], + [ + 1432, + 408 + ], + [ + 1435, + 420 + ], + [ + 1435, + 425 + ], + [ + 1446, + 425 + ], + [ + 1446, + 460 + ], + [ + 1445, + 477 + ], + [ + 1429, + 489 + ], + [ + 1408, + 492 + ], + [ + 1402, + 491 + ], + [ + 1397, + 476 + ], + [ + 1395, + 450 + ], + [ + 1392, + 422 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 516, + 6 + ], + [ + 533, + 324 + ], + [ + 509, + 324 + ], + [ + 503, + 209 + ], + [ + 506, + 208 + ], + [ + 499, + 2 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 493, + 290 + ], + [ + 495, + 325 + ], + [ + 477, + 328 + ], + [ + 475, + 286 + ], + [ + 484, + 284 + ], + [ + 487, + 286 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 655, + 191 + ], + [ + 660, + 197 + ], + [ + 662, + 244 + ], + [ + 661, + 246 + ], + [ + 640, + 247 + ], + [ + 636, + 243 + ], + [ + 633, + 199 + ], + [ + 638, + 194 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 487, + 232 + ], + [ + 587, + 208 + ], + [ + 645, + 204 + ], + [ + 645, + 208 + ], + [ + 589, + 212 + ], + [ + 490, + 238 + ], + [ + 477, + 248 + ], + [ + 470, + 266 + ], + [ + 472, + 329 + ], + [ + 467, + 331 + ], + [ + 465, + 263 + ], + [ + 470, + 250 + ], + [ + 478, + 239 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 451, + 159 + ], + [ + 458, + 329 + ], + [ + 442, + 327 + ], + [ + 436, + 159 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 314, + 205 + ], + [ + 322, + 339 + ], + [ + 316, + 336 + ], + [ + 309, + 205 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 291, + 246 + ], + [ + 302, + 251 + ], + [ + 304, + 344 + ], + [ + 301, + 344 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 265, + 302 + ], + [ + 267, + 344 + ], + [ + 263, + 346 + ], + [ + 260, + 301 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 184, + 145 + ], + [ + 201, + 349 + ], + [ + 181, + 354 + ], + [ + 170, + 152 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1402, + 657 + ], + [ + 1362, + 656 + ], + [ + 1320, + 655 + ], + [ + 1393, + 711 + ], + [ + 1829, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 651 + ], + [ + 1577, + 625 + ] + ] + }, + { + "label": "train", + "polygon": [ + [ + 24, + 359 + ], + [ + 139, + 348 + ], + [ + 329, + 334 + ], + [ + 477, + 323 + ], + [ + 679, + 305 + ], + [ + 740, + 302 + ], + [ + 979, + 285 + ], + [ + 1091, + 277 + ], + [ + 1163, + 273 + ], + [ + 1222, + 279 + ], + [ + 1252, + 282 + ], + [ + 1272, + 288 + ], + [ + 1296, + 362 + ], + [ + 1321, + 435 + ], + [ + 1335, + 459 + ], + [ + 1339, + 483 + ], + [ + 1335, + 499 + ], + [ + 1320, + 511 + ], + [ + 1284, + 514 + ], + [ + 1284, + 516 + ], + [ + 1281, + 519 + ], + [ + 1271, + 525 + ], + [ + 1248, + 532 + ], + [ + 1225, + 527 + ], + [ + 1211, + 526 + ], + [ + 1159, + 533 + ], + [ + 928, + 531 + ], + [ + 799, + 533 + ], + [ + 759, + 538 + ], + [ + 314, + 544 + ], + [ + 222, + 550 + ], + [ + 140, + 560 + ], + [ + 81, + 560 + ], + [ + 64, + 557 + ], + [ + 33, + 549 + ], + [ + 0, + 535 + ], + [ + 1, + 360 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 22, + 494 + ], + [ + 23, + 480 + ], + [ + 27, + 479 + ], + [ + 28, + 476 + ], + [ + 34, + 474 + ], + [ + 36, + 477 + ], + [ + 43, + 477 + ], + [ + 50, + 544 + ], + [ + 48, + 551 + ], + [ + 44, + 554 + ], + [ + 41, + 554 + ], + [ + 41, + 556 + ], + [ + 38, + 556 + ], + [ + 37, + 554 + ], + [ + 29, + 550 + ], + [ + 25, + 507 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1402, + 657 + ], + [ + 1362, + 656 + ], + [ + 1320, + 655 + ], + [ + 1393, + 711 + ], + [ + 1829, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 651 + ], + [ + 1577, + 625 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 70, + 335 + ], + [ + 86, + 337 + ], + [ + 94, + 347 + ], + [ + 90, + 355 + ], + [ + 96, + 363 + ], + [ + 90, + 373 + ], + [ + 93, + 382 + ], + [ + 97, + 383 + ], + [ + 94, + 389 + ], + [ + 87, + 396 + ], + [ + 75, + 399 + ], + [ + 71, + 394 + ], + [ + 68, + 371 + ], + [ + 69, + 342 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 90, + 316 + ], + [ + 94, + 490 + ], + [ + 99, + 499 + ], + [ + 103, + 564 + ], + [ + 87, + 564 + ], + [ + 83, + 498 + ], + [ + 86, + 490 + ], + [ + 77, + 315 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 210, + 393 + ], + [ + 219, + 553 + ], + [ + 214, + 554 + ], + [ + 206, + 395 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 272, + 559 + ], + [ + 290, + 560 + ], + [ + 290, + 567 + ], + [ + 227, + 573 + ], + [ + 238, + 560 + ], + [ + 258, + 560 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 29, + 586 + ], + [ + 58, + 587 + ], + [ + 66, + 590 + ], + [ + 66, + 597 + ], + [ + 34, + 606 + ], + [ + 0, + 614 + ], + [ + 0, + 585 + ], + [ + 15, + 583 + ], + [ + 18, + 585 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 337, + 481 + ], + [ + 347, + 466 + ], + [ + 359, + 452 + ], + [ + 381, + 436 + ], + [ + 401, + 422 + ], + [ + 430, + 418 + ], + [ + 452, + 418 + ], + [ + 467, + 422 + ], + [ + 485, + 423 + ], + [ + 547, + 420 + ], + [ + 584, + 419 + ], + [ + 601, + 410 + ], + [ + 639, + 409 + ], + [ + 652, + 411 + ], + [ + 671, + 424 + ], + [ + 685, + 435 + ], + [ + 704, + 459 + ], + [ + 718, + 482 + ], + [ + 742, + 481 + ], + [ + 757, + 483 + ], + [ + 758, + 488 + ], + [ + 759, + 499 + ], + [ + 759, + 501 + ], + [ + 747, + 505 + ], + [ + 745, + 507 + ], + [ + 760, + 526 + ], + [ + 767, + 536 + ], + [ + 770, + 561 + ], + [ + 771, + 589 + ], + [ + 770, + 641 + ], + [ + 766, + 648 + ], + [ + 744, + 654 + ], + [ + 734, + 652 + ], + [ + 726, + 635 + ], + [ + 725, + 629 + ], + [ + 713, + 641 + ], + [ + 703, + 647 + ], + [ + 699, + 652 + ], + [ + 696, + 674 + ], + [ + 693, + 686 + ], + [ + 681, + 694 + ], + [ + 661, + 698 + ], + [ + 650, + 692 + ], + [ + 639, + 680 + ], + [ + 564, + 683 + ], + [ + 510, + 680 + ], + [ + 473, + 678 + ], + [ + 460, + 674 + ], + [ + 443, + 677 + ], + [ + 423, + 677 + ], + [ + 412, + 677 + ], + [ + 408, + 678 + ], + [ + 397, + 699 + ], + [ + 375, + 706 + ], + [ + 354, + 707 + ], + [ + 347, + 705 + ], + [ + 338, + 696 + ], + [ + 332, + 674 + ], + [ + 331, + 670 + ], + [ + 313, + 669 + ], + [ + 311, + 654 + ], + [ + 300, + 634 + ], + [ + 293, + 619 + ], + [ + 298, + 604 + ], + [ + 300, + 595 + ], + [ + 302, + 564 + ], + [ + 311, + 525 + ], + [ + 321, + 503 + ], + [ + 328, + 491 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 943, + 459 + ], + [ + 954, + 440 + ], + [ + 967, + 422 + ], + [ + 986, + 416 + ], + [ + 1061, + 411 + ], + [ + 1106, + 413 + ], + [ + 1141, + 418 + ], + [ + 1165, + 445 + ], + [ + 1177, + 469 + ], + [ + 1185, + 473 + ], + [ + 1189, + 480 + ], + [ + 1189, + 487 + ], + [ + 1185, + 492 + ], + [ + 1191, + 506 + ], + [ + 1199, + 525 + ], + [ + 1203, + 538 + ], + [ + 1200, + 602 + ], + [ + 1201, + 622 + ], + [ + 1198, + 640 + ], + [ + 1195, + 647 + ], + [ + 1168, + 649 + ], + [ + 1159, + 637 + ], + [ + 1155, + 624 + ], + [ + 1116, + 623 + ], + [ + 1070, + 625 + ], + [ + 1044, + 627 + ], + [ + 1023, + 627 + ], + [ + 1003, + 626 + ], + [ + 990, + 624 + ], + [ + 980, + 622 + ], + [ + 962, + 621 + ], + [ + 957, + 620 + ], + [ + 955, + 629 + ], + [ + 955, + 643 + ], + [ + 946, + 646 + ], + [ + 929, + 645 + ], + [ + 922, + 645 + ], + [ + 920, + 638 + ], + [ + 920, + 616 + ], + [ + 920, + 582 + ], + [ + 920, + 560 + ], + [ + 918, + 545 + ], + [ + 921, + 523 + ], + [ + 924, + 512 + ], + [ + 933, + 493 + ], + [ + 928, + 490 + ], + [ + 908, + 490 + ], + [ + 903, + 488 + ], + [ + 905, + 477 + ], + [ + 912, + 471 + ], + [ + 928, + 471 + ], + [ + 931, + 471 + ], + [ + 934, + 480 + ], + [ + 935, + 481 + ], + [ + 938, + 472 + ], + [ + 939, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1477, + 416 + ], + [ + 1495, + 386 + ], + [ + 1520, + 356 + ], + [ + 1542, + 338 + ], + [ + 1595, + 316 + ], + [ + 1648, + 303 + ], + [ + 1702, + 297 + ], + [ + 1720, + 300 + ], + [ + 1815, + 287 + ], + [ + 1915, + 278 + ], + [ + 1989, + 270 + ], + [ + 2029, + 270 + ], + [ + 2048, + 292 + ], + [ + 2048, + 773 + ], + [ + 2034, + 765 + ], + [ + 2023, + 749 + ], + [ + 2013, + 738 + ], + [ + 1990, + 736 + ], + [ + 1936, + 736 + ], + [ + 1855, + 743 + ], + [ + 1806, + 754 + ], + [ + 1753, + 759 + ], + [ + 1713, + 760 + ], + [ + 1698, + 762 + ], + [ + 1682, + 763 + ], + [ + 1667, + 764 + ], + [ + 1663, + 778 + ], + [ + 1660, + 795 + ], + [ + 1654, + 810 + ], + [ + 1648, + 814 + ], + [ + 1616, + 816 + ], + [ + 1590, + 813 + ], + [ + 1574, + 791 + ], + [ + 1565, + 754 + ], + [ + 1561, + 725 + ], + [ + 1559, + 702 + ], + [ + 1547, + 694 + ], + [ + 1528, + 688 + ], + [ + 1511, + 684 + ], + [ + 1509, + 682 + ], + [ + 1507, + 681 + ], + [ + 1505, + 681 + ], + [ + 1504, + 686 + ], + [ + 1504, + 696 + ], + [ + 1499, + 705 + ], + [ + 1490, + 718 + ], + [ + 1477, + 724 + ], + [ + 1461, + 724 + ], + [ + 1435, + 720 + ], + [ + 1425, + 714 + ], + [ + 1410, + 692 + ], + [ + 1400, + 649 + ], + [ + 1401, + 628 + ], + [ + 1402, + 611 + ], + [ + 1406, + 604 + ], + [ + 1405, + 587 + ], + [ + 1406, + 552 + ], + [ + 1416, + 532 + ], + [ + 1426, + 514 + ], + [ + 1430, + 504 + ], + [ + 1441, + 487 + ], + [ + 1430, + 484 + ], + [ + 1418, + 484 + ], + [ + 1407, + 477 + ], + [ + 1405, + 462 + ], + [ + 1410, + 451 + ], + [ + 1418, + 445 + ], + [ + 1429, + 445 + ], + [ + 1446, + 444 + ], + [ + 1454, + 448 + ], + [ + 1456, + 462 + ], + [ + 1460, + 448 + ], + [ + 1472, + 422 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 399, + 555 + ], + [ + 521, + 552 + ], + [ + 519, + 578 + ], + [ + 400, + 585 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1017, + 565 + ], + [ + 1110, + 563 + ], + [ + 1111, + 585 + ], + [ + 1017, + 587 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1796, + 686 + ], + [ + 1975, + 672 + ], + [ + 1985, + 713 + ], + [ + 1803, + 730 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 899, + 41 + ], + [ + 933, + 41 + ], + [ + 918, + 65 + ], + [ + 913, + 61 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 893, + 166 + ], + [ + 917, + 168 + ], + [ + 917, + 180 + ], + [ + 893, + 179 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1656, + 58 + ], + [ + 1679, + 63 + ], + [ + 1682, + 71 + ], + [ + 1695, + 74 + ], + [ + 1695, + 80 + ], + [ + 1679, + 86 + ], + [ + 1679, + 103 + ], + [ + 1694, + 103 + ], + [ + 1695, + 111 + ], + [ + 1679, + 115 + ], + [ + 1679, + 124 + ], + [ + 1698, + 130 + ], + [ + 1697, + 138 + ], + [ + 1678, + 145 + ], + [ + 1670, + 154 + ], + [ + 1668, + 154 + ], + [ + 1652, + 153 + ], + [ + 1657, + 67 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 180, + 560 + ], + [ + 158, + 564 + ], + [ + 151, + 572 + ], + [ + 192, + 574 + ], + [ + 223, + 565 + ], + [ + 237, + 566 + ], + [ + 253, + 562 + ], + [ + 253, + 546 + ], + [ + 181, + 549 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 188, + 552 + ], + [ + 176, + 553 + ], + [ + 166, + 474 + ], + [ + 173, + 472 + ], + [ + 173, + 466 + ], + [ + 178, + 466 + ], + [ + 178, + 471 + ], + [ + 188, + 471 + ], + [ + 197, + 549 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 22, + 494 + ], + [ + 23, + 480 + ], + [ + 27, + 479 + ], + [ + 28, + 476 + ], + [ + 34, + 474 + ], + [ + 36, + 477 + ], + [ + 43, + 477 + ], + [ + 50, + 544 + ], + [ + 48, + 551 + ], + [ + 44, + 554 + ], + [ + 41, + 554 + ], + [ + 41, + 556 + ], + [ + 38, + 556 + ], + [ + 37, + 554 + ], + [ + 29, + 550 + ], + [ + 25, + 507 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 27, + 556 + ], + [ + 39, + 554 + ], + [ + 62, + 554 + ], + [ + 70, + 560 + ], + [ + 39, + 563 + ], + [ + 26, + 562 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 0, + 499 + ], + [ + 24, + 499 + ], + [ + 28, + 573 + ], + [ + 0, + 577 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 209, + 352 + ], + [ + 199, + 354 + ], + [ + 194, + 365 + ], + [ + 194, + 381 + ], + [ + 200, + 393 + ], + [ + 210, + 398 + ], + [ + 218, + 391 + ], + [ + 222, + 380 + ], + [ + 222, + 368 + ], + [ + 216, + 358 + ], + [ + 212, + 355 + ], + [ + 211, + 354 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000012_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000012_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..3c928f168fc5e9fdb22ac816f850aadad44232fd Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000012_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000012_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000012_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..f772da0f1fe7fdccdd554ef436ce540d68c5f0a5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000012_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000013_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000013_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..ff248971a9bbc22cd8a94a32dbc5cb6a1e7a877b --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000013_000019_gtFine_polygons.json @@ -0,0 +1,6674 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 702, + 1 + ], + [ + 1347, + 1 + ], + [ + 1230, + 345 + ], + [ + 881, + 309 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 0, + 601 + ], + [ + 972, + 463 + ], + [ + 1021, + 449 + ], + [ + 1114, + 450 + ], + [ + 1246, + 493 + ], + [ + 1475, + 536 + ], + [ + 1760, + 569 + ], + [ + 2048, + 599 + ], + [ + 2048, + 1024 + ], + [ + 1, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 774, + 472 + ], + [ + 818, + 477 + ], + [ + 866, + 489 + ], + [ + 872, + 492 + ], + [ + 812, + 496 + ], + [ + 785, + 495 + ], + [ + 684, + 507 + ], + [ + 664, + 507 + ], + [ + 674, + 516 + ], + [ + 672, + 520 + ], + [ + 471, + 547 + ], + [ + 242, + 583 + ], + [ + 37, + 616 + ], + [ + 0, + 622 + ], + [ + 1, + 530 + ], + [ + 521, + 491 + ], + [ + 718, + 477 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1168, + 500 + ], + [ + 1161, + 502 + ], + [ + 1331, + 597 + ], + [ + 1520, + 563 + ], + [ + 1314, + 495 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1168, + 500 + ], + [ + 1161, + 502 + ], + [ + 1331, + 597 + ], + [ + 1520, + 563 + ], + [ + 1314, + 495 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1432, + 477 + ], + [ + 1535, + 493 + ], + [ + 1898, + 535 + ], + [ + 2048, + 547 + ], + [ + 2048, + 691 + ], + [ + 1766, + 671 + ], + [ + 1521, + 648 + ], + [ + 1355, + 614 + ], + [ + 1337, + 602 + ], + [ + 1334, + 596 + ], + [ + 1340, + 589 + ], + [ + 1448, + 571 + ], + [ + 1497, + 566 + ], + [ + 1436, + 547 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 885, + 242 + ], + [ + 911, + 242 + ], + [ + 918, + 234 + ], + [ + 928, + 233 + ], + [ + 932, + 235 + ], + [ + 939, + 237 + ], + [ + 942, + 241 + ], + [ + 953, + 244 + ], + [ + 955, + 235 + ], + [ + 959, + 235 + ], + [ + 962, + 242 + ], + [ + 963, + 244 + ], + [ + 969, + 235 + ], + [ + 973, + 235 + ], + [ + 977, + 233 + ], + [ + 979, + 232 + ], + [ + 984, + 236 + ], + [ + 994, + 242 + ], + [ + 994, + 232 + ], + [ + 999, + 239 + ], + [ + 1002, + 244 + ], + [ + 1004, + 257 + ], + [ + 1008, + 258 + ], + [ + 1013, + 252 + ], + [ + 1017, + 254 + ], + [ + 1017, + 246 + ], + [ + 1024, + 244 + ], + [ + 1025, + 257 + ], + [ + 1041, + 255 + ], + [ + 1039, + 265 + ], + [ + 1061, + 267 + ], + [ + 1088, + 260 + ], + [ + 1113, + 262 + ], + [ + 1164, + 290 + ], + [ + 1191, + 383 + ], + [ + 1190, + 446 + ], + [ + 1092, + 457 + ], + [ + 1086, + 459 + ], + [ + 1018, + 462 + ], + [ + 911, + 459 + ], + [ + 897, + 463 + ], + [ + 895, + 360 + ], + [ + 882, + 252 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1028, + 455 + ], + [ + 1037, + 448 + ], + [ + 1047, + 447 + ], + [ + 1055, + 449 + ], + [ + 1060, + 448 + ], + [ + 1070, + 447 + ], + [ + 1074, + 447 + ], + [ + 1078, + 452 + ], + [ + 1078, + 444 + ], + [ + 1084, + 446 + ], + [ + 1087, + 448 + ], + [ + 1094, + 449 + ], + [ + 1100, + 446 + ], + [ + 1108, + 444 + ], + [ + 1113, + 446 + ], + [ + 1114, + 450 + ], + [ + 1102, + 461 + ], + [ + 1080, + 462 + ], + [ + 1054, + 464 + ], + [ + 1045, + 464 + ], + [ + 1024, + 463 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 2 + ], + [ + 754, + 0 + ], + [ + 755, + 4 + ], + [ + 755, + 11 + ], + [ + 755, + 16 + ], + [ + 768, + 18 + ], + [ + 782, + 18 + ], + [ + 789, + 25 + ], + [ + 781, + 37 + ], + [ + 788, + 40 + ], + [ + 791, + 49 + ], + [ + 792, + 51 + ], + [ + 773, + 57 + ], + [ + 776, + 62 + ], + [ + 780, + 66 + ], + [ + 780, + 86 + ], + [ + 792, + 85 + ], + [ + 803, + 151 + ], + [ + 825, + 153 + ], + [ + 825, + 148 + ], + [ + 838, + 146 + ], + [ + 840, + 176 + ], + [ + 839, + 198 + ], + [ + 851, + 208 + ], + [ + 875, + 223 + ], + [ + 879, + 235 + ], + [ + 895, + 243 + ], + [ + 894, + 348 + ], + [ + 898, + 466 + ], + [ + 819, + 479 + ], + [ + 699, + 487 + ], + [ + 646, + 493 + ], + [ + 519, + 509 + ], + [ + 439, + 516 + ], + [ + 365, + 521 + ], + [ + 297, + 528 + ], + [ + 109, + 541 + ], + [ + 36, + 545 + ], + [ + 13, + 546 + ], + [ + 0, + 547 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 866, + 459 + ], + [ + 862, + 409 + ], + [ + 851, + 397 + ], + [ + 851, + 367 + ], + [ + 857, + 345 + ], + [ + 864, + 336 + ], + [ + 871, + 330 + ], + [ + 888, + 326 + ], + [ + 896, + 326 + ], + [ + 905, + 329 + ], + [ + 915, + 325 + ], + [ + 919, + 315 + ], + [ + 921, + 309 + ], + [ + 927, + 308 + ], + [ + 935, + 308 + ], + [ + 941, + 303 + ], + [ + 948, + 303 + ], + [ + 951, + 317 + ], + [ + 951, + 325 + ], + [ + 957, + 331 + ], + [ + 962, + 329 + ], + [ + 969, + 324 + ], + [ + 973, + 329 + ], + [ + 974, + 339 + ], + [ + 976, + 346 + ], + [ + 980, + 355 + ], + [ + 983, + 370 + ], + [ + 992, + 385 + ], + [ + 1000, + 395 + ], + [ + 1004, + 404 + ], + [ + 1000, + 410 + ], + [ + 980, + 419 + ], + [ + 965, + 424 + ], + [ + 962, + 438 + ], + [ + 957, + 455 + ], + [ + 951, + 453 + ], + [ + 952, + 438 + ], + [ + 948, + 437 + ], + [ + 947, + 454 + ], + [ + 929, + 474 + ], + [ + 924, + 475 + ], + [ + 915, + 462 + ], + [ + 918, + 436 + ], + [ + 912, + 432 + ], + [ + 910, + 432 + ], + [ + 906, + 437 + ], + [ + 898, + 439 + ], + [ + 898, + 464 + ], + [ + 892, + 464 + ], + [ + 894, + 433 + ], + [ + 891, + 428 + ], + [ + 884, + 429 + ], + [ + 873, + 426 + ], + [ + 871, + 463 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 859, + 277 + ], + [ + 866, + 278 + ], + [ + 870, + 472 + ], + [ + 862, + 470 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 824, + 434 + ], + [ + 825, + 475 + ], + [ + 818, + 477 + ], + [ + 819, + 430 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 844, + 406 + ], + [ + 828, + 402 + ], + [ + 818, + 400 + ], + [ + 803, + 393 + ], + [ + 796, + 374 + ], + [ + 795, + 354 + ], + [ + 796, + 347 + ], + [ + 802, + 335 + ], + [ + 809, + 330 + ], + [ + 819, + 325 + ], + [ + 829, + 324 + ], + [ + 837, + 333 + ], + [ + 855, + 339 + ], + [ + 868, + 337 + ], + [ + 882, + 348 + ], + [ + 889, + 360 + ], + [ + 891, + 375 + ], + [ + 893, + 389 + ], + [ + 886, + 397 + ], + [ + 878, + 405 + ], + [ + 864, + 408 + ], + [ + 851, + 408 + ], + [ + 852, + 464 + ], + [ + 846, + 465 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 835, + 413 + ], + [ + 845, + 420 + ], + [ + 844, + 436 + ], + [ + 835, + 441 + ], + [ + 807, + 439 + ], + [ + 807, + 414 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 754, + 450 + ], + [ + 787, + 456 + ], + [ + 796, + 470 + ], + [ + 791, + 491 + ], + [ + 761, + 478 + ], + [ + 748, + 468 + ], + [ + 744, + 459 + ], + [ + 744, + 455 + ], + [ + 747, + 454 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 742, + 388 + ], + [ + 742, + 381 + ], + [ + 730, + 375 + ], + [ + 731, + 368 + ], + [ + 736, + 366 + ], + [ + 743, + 363 + ], + [ + 754, + 353 + ], + [ + 765, + 339 + ], + [ + 779, + 336 + ], + [ + 796, + 335 + ], + [ + 802, + 336 + ], + [ + 812, + 349 + ], + [ + 819, + 359 + ], + [ + 830, + 367 + ], + [ + 836, + 378 + ], + [ + 835, + 396 + ], + [ + 823, + 404 + ], + [ + 823, + 409 + ], + [ + 823, + 415 + ], + [ + 821, + 428 + ], + [ + 814, + 430 + ], + [ + 809, + 435 + ], + [ + 803, + 441 + ], + [ + 803, + 476 + ], + [ + 799, + 476 + ], + [ + 797, + 437 + ], + [ + 792, + 431 + ], + [ + 791, + 430 + ], + [ + 788, + 473 + ], + [ + 785, + 476 + ], + [ + 782, + 426 + ], + [ + 776, + 427 + ], + [ + 773, + 430 + ], + [ + 766, + 430 + ], + [ + 761, + 422 + ], + [ + 752, + 428 + ], + [ + 742, + 430 + ], + [ + 742, + 422 + ], + [ + 735, + 414 + ], + [ + 729, + 411 + ], + [ + 729, + 407 + ], + [ + 732, + 403 + ], + [ + 736, + 397 + ], + [ + 736, + 394 + ], + [ + 739, + 391 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 842, + 461 + ], + [ + 849, + 458 + ], + [ + 856, + 457 + ], + [ + 868, + 456 + ], + [ + 881, + 453 + ], + [ + 896, + 456 + ], + [ + 906, + 457 + ], + [ + 915, + 456 + ], + [ + 934, + 458 + ], + [ + 934, + 473 + ], + [ + 929, + 478 + ], + [ + 919, + 481 + ], + [ + 902, + 481 + ], + [ + 877, + 481 + ], + [ + 864, + 487 + ], + [ + 855, + 486 + ], + [ + 842, + 480 + ], + [ + 841, + 471 + ], + [ + 839, + 464 + ], + [ + 839, + 468 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 832, + 457 + ], + [ + 850, + 459 + ], + [ + 853, + 464 + ], + [ + 859, + 467 + ], + [ + 860, + 480 + ], + [ + 857, + 488 + ], + [ + 854, + 490 + ], + [ + 849, + 490 + ], + [ + 842, + 489 + ], + [ + 841, + 487 + ], + [ + 826, + 487 + ], + [ + 824, + 489 + ], + [ + 812, + 489 + ], + [ + 810, + 480 + ], + [ + 810, + 474 + ], + [ + 818, + 466 + ], + [ + 819, + 460 + ], + [ + 825, + 458 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 809, + 462 + ], + [ + 808, + 493 + ], + [ + 804, + 493 + ], + [ + 804, + 468 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 700, + 375 + ], + [ + 687, + 363 + ], + [ + 678, + 363 + ], + [ + 670, + 363 + ], + [ + 657, + 363 + ], + [ + 646, + 363 + ], + [ + 636, + 359 + ], + [ + 636, + 346 + ], + [ + 642, + 335 + ], + [ + 635, + 310 + ], + [ + 640, + 281 + ], + [ + 663, + 251 + ], + [ + 694, + 236 + ], + [ + 711, + 228 + ], + [ + 719, + 224 + ], + [ + 728, + 227 + ], + [ + 730, + 232 + ], + [ + 730, + 245 + ], + [ + 741, + 242 + ], + [ + 745, + 250 + ], + [ + 749, + 252 + ], + [ + 754, + 254 + ], + [ + 759, + 260 + ], + [ + 767, + 260 + ], + [ + 774, + 266 + ], + [ + 775, + 278 + ], + [ + 783, + 280 + ], + [ + 785, + 292 + ], + [ + 785, + 300 + ], + [ + 792, + 308 + ], + [ + 780, + 321 + ], + [ + 774, + 329 + ], + [ + 783, + 345 + ], + [ + 786, + 361 + ], + [ + 777, + 363 + ], + [ + 759, + 358 + ], + [ + 743, + 358 + ], + [ + 726, + 367 + ], + [ + 723, + 371 + ], + [ + 715, + 380 + ], + [ + 708, + 387 + ], + [ + 708, + 465 + ], + [ + 704, + 467 + ], + [ + 700, + 426 + ], + [ + 700, + 383 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 488, + 216 + ], + [ + 499, + 217 + ], + [ + 498, + 206 + ], + [ + 498, + 197 + ], + [ + 509, + 184 + ], + [ + 519, + 177 + ], + [ + 522, + 171 + ], + [ + 528, + 159 + ], + [ + 540, + 145 + ], + [ + 560, + 140 + ], + [ + 565, + 134 + ], + [ + 579, + 123 + ], + [ + 594, + 116 + ], + [ + 614, + 120 + ], + [ + 627, + 128 + ], + [ + 653, + 120 + ], + [ + 675, + 118 + ], + [ + 684, + 124 + ], + [ + 679, + 138 + ], + [ + 664, + 159 + ], + [ + 681, + 164 + ], + [ + 692, + 172 + ], + [ + 702, + 179 + ], + [ + 716, + 180 + ], + [ + 726, + 181 + ], + [ + 723, + 190 + ], + [ + 713, + 201 + ], + [ + 702, + 209 + ], + [ + 709, + 218 + ], + [ + 713, + 235 + ], + [ + 720, + 250 + ], + [ + 717, + 274 + ], + [ + 699, + 285 + ], + [ + 668, + 306 + ], + [ + 655, + 317 + ], + [ + 646, + 327 + ], + [ + 633, + 331 + ], + [ + 625, + 335 + ], + [ + 617, + 335 + ], + [ + 602, + 327 + ], + [ + 593, + 325 + ], + [ + 596, + 396 + ], + [ + 594, + 484 + ], + [ + 584, + 494 + ], + [ + 577, + 474 + ], + [ + 578, + 403 + ], + [ + 574, + 364 + ], + [ + 578, + 344 + ], + [ + 580, + 325 + ], + [ + 574, + 320 + ], + [ + 567, + 325 + ], + [ + 559, + 327 + ], + [ + 550, + 325 + ], + [ + 547, + 322 + ], + [ + 532, + 323 + ], + [ + 528, + 332 + ], + [ + 504, + 347 + ], + [ + 475, + 334 + ], + [ + 474, + 326 + ], + [ + 474, + 316 + ], + [ + 474, + 302 + ], + [ + 467, + 295 + ], + [ + 457, + 295 + ], + [ + 453, + 287 + ], + [ + 453, + 274 + ], + [ + 457, + 268 + ], + [ + 466, + 260 + ], + [ + 460, + 254 + ], + [ + 456, + 244 + ], + [ + 458, + 241 + ], + [ + 456, + 235 + ], + [ + 458, + 231 + ], + [ + 467, + 227 + ], + [ + 476, + 220 + ], + [ + 479, + 217 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 904, + 456 + ], + [ + 909, + 464 + ], + [ + 913, + 473 + ], + [ + 915, + 485 + ], + [ + 910, + 487 + ], + [ + 902, + 487 + ], + [ + 899, + 485 + ], + [ + 882, + 485 + ], + [ + 881, + 488 + ], + [ + 877, + 488 + ], + [ + 869, + 488 + ], + [ + 867, + 485 + ], + [ + 867, + 480 + ], + [ + 866, + 473 + ], + [ + 873, + 466 + ], + [ + 876, + 456 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 689, + 396 + ], + [ + 691, + 427 + ], + [ + 676, + 429 + ], + [ + 674, + 401 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 684, + 393 + ], + [ + 687, + 464 + ], + [ + 683, + 468 + ], + [ + 680, + 392 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 690, + 425 + ], + [ + 692, + 435 + ], + [ + 676, + 436 + ], + [ + 676, + 426 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 746, + 467 + ], + [ + 754, + 459 + ], + [ + 771, + 458 + ], + [ + 782, + 459 + ], + [ + 792, + 473 + ], + [ + 794, + 486 + ], + [ + 793, + 498 + ], + [ + 786, + 499 + ], + [ + 771, + 499 + ], + [ + 763, + 487 + ], + [ + 755, + 481 + ], + [ + 749, + 474 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 729, + 465 + ], + [ + 738, + 459 + ], + [ + 754, + 459 + ], + [ + 769, + 459 + ], + [ + 777, + 472 + ], + [ + 778, + 481 + ], + [ + 781, + 492 + ], + [ + 781, + 498 + ], + [ + 779, + 500 + ], + [ + 775, + 499 + ], + [ + 772, + 502 + ], + [ + 768, + 504 + ], + [ + 763, + 500 + ], + [ + 742, + 499 + ], + [ + 736, + 488 + ], + [ + 734, + 478 + ], + [ + 730, + 472 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 699, + 465 + ], + [ + 702, + 455 + ], + [ + 714, + 452 + ], + [ + 733, + 452 + ], + [ + 742, + 462 + ], + [ + 747, + 480 + ], + [ + 749, + 493 + ], + [ + 747, + 501 + ], + [ + 745, + 508 + ], + [ + 742, + 507 + ], + [ + 739, + 504 + ], + [ + 735, + 503 + ], + [ + 728, + 508 + ], + [ + 718, + 506 + ], + [ + 710, + 493 + ], + [ + 703, + 472 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 651, + 470 + ], + [ + 659, + 462 + ], + [ + 671, + 456 + ], + [ + 700, + 455 + ], + [ + 713, + 457 + ], + [ + 726, + 476 + ], + [ + 728, + 496 + ], + [ + 725, + 511 + ], + [ + 714, + 512 + ], + [ + 706, + 513 + ], + [ + 687, + 511 + ], + [ + 665, + 512 + ], + [ + 662, + 511 + ], + [ + 656, + 508 + ], + [ + 651, + 512 + ], + [ + 642, + 512 + ], + [ + 640, + 506 + ], + [ + 640, + 494 + ], + [ + 640, + 488 + ], + [ + 645, + 479 + ], + [ + 645, + 475 + ], + [ + 645, + 472 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 620, + 407 + ], + [ + 624, + 408 + ], + [ + 630, + 416 + ], + [ + 629, + 422 + ], + [ + 621, + 424 + ], + [ + 616, + 422 + ], + [ + 608, + 419 + ], + [ + 614, + 408 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 624, + 367 + ], + [ + 627, + 513 + ], + [ + 623, + 513 + ], + [ + 616, + 366 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 607, + 432 + ], + [ + 633, + 432 + ], + [ + 631, + 465 + ], + [ + 609, + 464 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 436, + 315 + ], + [ + 489, + 329 + ], + [ + 487, + 349 + ], + [ + 434, + 338 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 516, + 317 + ], + [ + 519, + 351 + ], + [ + 485, + 352 + ], + [ + 481, + 320 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 564, + 469 + ], + [ + 574, + 469 + ], + [ + 579, + 469 + ], + [ + 589, + 468 + ], + [ + 583, + 479 + ], + [ + 587, + 486 + ], + [ + 600, + 492 + ], + [ + 606, + 501 + ], + [ + 605, + 518 + ], + [ + 584, + 519 + ], + [ + 565, + 521 + ], + [ + 539, + 525 + ], + [ + 526, + 529 + ], + [ + 515, + 529 + ], + [ + 505, + 524 + ], + [ + 496, + 517 + ], + [ + 465, + 523 + ], + [ + 456, + 528 + ], + [ + 442, + 528 + ], + [ + 435, + 520 + ], + [ + 434, + 513 + ], + [ + 437, + 498 + ], + [ + 440, + 495 + ], + [ + 450, + 493 + ], + [ + 459, + 492 + ], + [ + 461, + 483 + ], + [ + 459, + 477 + ], + [ + 467, + 475 + ], + [ + 479, + 478 + ], + [ + 484, + 480 + ], + [ + 485, + 474 + ], + [ + 484, + 467 + ], + [ + 487, + 464 + ], + [ + 501, + 464 + ], + [ + 512, + 464 + ], + [ + 517, + 469 + ], + [ + 531, + 464 + ], + [ + 535, + 461 + ], + [ + 546, + 459 + ], + [ + 554, + 463 + ], + [ + 560, + 470 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 451, + 514 + ], + [ + 452, + 496 + ], + [ + 547, + 498 + ], + [ + 617, + 494 + ], + [ + 617, + 516 + ], + [ + 549, + 525 + ], + [ + 451, + 528 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 371, + 374 + ], + [ + 370, + 460 + ], + [ + 375, + 466 + ], + [ + 378, + 375 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 353, + 325 + ], + [ + 401, + 323 + ], + [ + 403, + 388 + ], + [ + 351, + 390 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 180, + 442 + ], + [ + 194, + 451 + ], + [ + 195, + 477 + ], + [ + 194, + 549 + ], + [ + 154, + 550 + ], + [ + 131, + 511 + ], + [ + 122, + 493 + ], + [ + 124, + 473 + ], + [ + 126, + 460 + ], + [ + 128, + 448 + ], + [ + 137, + 445 + ], + [ + 151, + 442 + ], + [ + 162, + 441 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 297, + 478 + ], + [ + 302, + 491 + ], + [ + 307, + 499 + ], + [ + 323, + 513 + ], + [ + 326, + 521 + ], + [ + 328, + 533 + ], + [ + 328, + 537 + ], + [ + 330, + 542 + ], + [ + 329, + 550 + ], + [ + 307, + 548 + ], + [ + 300, + 546 + ], + [ + 288, + 533 + ], + [ + 280, + 500 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 179, + 373 + ], + [ + 320, + 372 + ], + [ + 374, + 379 + ], + [ + 409, + 382 + ], + [ + 373, + 391 + ], + [ + 300, + 394 + ], + [ + 300, + 528 + ], + [ + 284, + 533 + ], + [ + 281, + 554 + ], + [ + 216, + 551 + ], + [ + 214, + 530 + ], + [ + 204, + 532 + ], + [ + 202, + 555 + ], + [ + 190, + 556 + ], + [ + 184, + 491 + ], + [ + 181, + 412 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 401, + 486 + ], + [ + 401, + 504 + ], + [ + 399, + 520 + ], + [ + 394, + 524 + ], + [ + 379, + 527 + ], + [ + 362, + 526 + ], + [ + 353, + 520 + ], + [ + 353, + 489 + ], + [ + 369, + 486 + ], + [ + 393, + 487 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 362, + 439 + ], + [ + 370, + 436 + ], + [ + 377, + 434 + ], + [ + 381, + 445 + ], + [ + 388, + 461 + ], + [ + 393, + 468 + ], + [ + 393, + 480 + ], + [ + 392, + 496 + ], + [ + 391, + 501 + ], + [ + 384, + 515 + ], + [ + 383, + 523 + ], + [ + 383, + 533 + ], + [ + 387, + 541 + ], + [ + 387, + 547 + ], + [ + 381, + 547 + ], + [ + 368, + 547 + ], + [ + 365, + 546 + ], + [ + 363, + 536 + ], + [ + 362, + 524 + ], + [ + 356, + 508 + ], + [ + 355, + 501 + ], + [ + 355, + 490 + ], + [ + 358, + 471 + ], + [ + 357, + 461 + ], + [ + 358, + 450 + ], + [ + 359, + 450 + ], + [ + 360, + 445 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 125, + 295 + ], + [ + 131, + 295 + ], + [ + 133, + 291 + ], + [ + 138, + 284 + ], + [ + 143, + 282 + ], + [ + 153, + 281 + ], + [ + 162, + 284 + ], + [ + 168, + 289 + ], + [ + 173, + 296 + ], + [ + 181, + 297 + ], + [ + 182, + 372 + ], + [ + 123, + 374 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 47, + 474 + ], + [ + 19, + 481 + ], + [ + 46, + 551 + ], + [ + 33, + 552 + ], + [ + 13, + 479 + ], + [ + 2, + 501 + ], + [ + 0, + 471 + ], + [ + 3, + 468 + ], + [ + 14, + 469 + ], + [ + 21, + 470 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 65, + 482 + ], + [ + 108, + 483 + ], + [ + 109, + 567 + ], + [ + 93, + 569 + ], + [ + 76, + 568 + ], + [ + 64, + 559 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 161, + 494 + ], + [ + 178, + 496 + ], + [ + 180, + 522 + ], + [ + 187, + 523 + ], + [ + 188, + 574 + ], + [ + 169, + 577 + ], + [ + 145, + 581 + ], + [ + 104, + 574 + ], + [ + 107, + 527 + ], + [ + 115, + 526 + ], + [ + 118, + 496 + ], + [ + 132, + 493 + ], + [ + 147, + 492 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 247, + 431 + ], + [ + 253, + 424 + ], + [ + 264, + 422 + ], + [ + 269, + 424 + ], + [ + 270, + 431 + ], + [ + 271, + 439 + ], + [ + 271, + 451 + ], + [ + 277, + 452 + ], + [ + 286, + 452 + ], + [ + 290, + 459 + ], + [ + 285, + 463 + ], + [ + 277, + 474 + ], + [ + 280, + 483 + ], + [ + 283, + 494 + ], + [ + 284, + 507 + ], + [ + 284, + 516 + ], + [ + 277, + 522 + ], + [ + 274, + 537 + ], + [ + 273, + 547 + ], + [ + 273, + 553 + ], + [ + 282, + 564 + ], + [ + 273, + 566 + ], + [ + 263, + 566 + ], + [ + 254, + 564 + ], + [ + 250, + 544 + ], + [ + 251, + 530 + ], + [ + 251, + 511 + ], + [ + 244, + 521 + ], + [ + 233, + 510 + ], + [ + 241, + 499 + ], + [ + 237, + 483 + ], + [ + 239, + 460 + ], + [ + 242, + 448 + ], + [ + 242, + 438 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1115, + 454 + ], + [ + 1105, + 452 + ], + [ + 1100, + 428 + ], + [ + 1095, + 420 + ], + [ + 1085, + 417 + ], + [ + 1076, + 416 + ], + [ + 1072, + 412 + ], + [ + 1067, + 401 + ], + [ + 1056, + 393 + ], + [ + 1046, + 383 + ], + [ + 1042, + 373 + ], + [ + 1039, + 362 + ], + [ + 1046, + 349 + ], + [ + 1051, + 344 + ], + [ + 1051, + 328 + ], + [ + 1051, + 312 + ], + [ + 1059, + 296 + ], + [ + 1071, + 285 + ], + [ + 1086, + 279 + ], + [ + 1118, + 273 + ], + [ + 1164, + 273 + ], + [ + 1197, + 287 + ], + [ + 1196, + 391 + ], + [ + 1187, + 418 + ], + [ + 1184, + 430 + ], + [ + 1165, + 459 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1109, + 462 + ], + [ + 1114, + 451 + ], + [ + 1123, + 450 + ], + [ + 1138, + 445 + ], + [ + 1151, + 444 + ], + [ + 1165, + 446 + ], + [ + 1159, + 488 + ], + [ + 1142, + 492 + ], + [ + 1134, + 492 + ], + [ + 1121, + 484 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1191, + 449 + ], + [ + 1189, + 242 + ], + [ + 1228, + 137 + ], + [ + 1238, + 63 + ], + [ + 1252, + 41 + ], + [ + 1256, + 43 + ], + [ + 1265, + 40 + ], + [ + 1264, + 37 + ], + [ + 1257, + 28 + ], + [ + 1271, + 0 + ], + [ + 2048, + 1 + ], + [ + 2048, + 556 + ], + [ + 1834, + 540 + ], + [ + 1698, + 524 + ], + [ + 1517, + 511 + ], + [ + 1459, + 505 + ], + [ + 1288, + 472 + ], + [ + 1208, + 456 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1160, + 430 + ], + [ + 1183, + 428 + ], + [ + 1227, + 429 + ], + [ + 1235, + 437 + ], + [ + 1173, + 485 + ], + [ + 1153, + 475 + ], + [ + 1155, + 447 + ], + [ + 1157, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1247, + 451 + ], + [ + 1177, + 473 + ], + [ + 1177, + 459 + ], + [ + 1178, + 445 + ], + [ + 1185, + 436 + ], + [ + 1221, + 432 + ], + [ + 1237, + 435 + ], + [ + 1241, + 438 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1178, + 499 + ], + [ + 1162, + 499 + ], + [ + 1155, + 503 + ], + [ + 1147, + 499 + ], + [ + 1144, + 494 + ], + [ + 1146, + 487 + ], + [ + 1155, + 475 + ], + [ + 1158, + 472 + ], + [ + 1164, + 464 + ], + [ + 1173, + 460 + ], + [ + 1183, + 457 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1199, + 415 + ], + [ + 1191, + 409 + ], + [ + 1181, + 410 + ], + [ + 1165, + 410 + ], + [ + 1149, + 406 + ], + [ + 1134, + 396 + ], + [ + 1124, + 382 + ], + [ + 1120, + 366 + ], + [ + 1130, + 346 + ], + [ + 1142, + 332 + ], + [ + 1128, + 324 + ], + [ + 1104, + 315 + ], + [ + 1101, + 303 + ], + [ + 1087, + 288 + ], + [ + 1079, + 276 + ], + [ + 1080, + 270 + ], + [ + 1065, + 263 + ], + [ + 1061, + 253 + ], + [ + 1058, + 245 + ], + [ + 1056, + 239 + ], + [ + 1059, + 228 + ], + [ + 1066, + 223 + ], + [ + 1070, + 218 + ], + [ + 1067, + 208 + ], + [ + 1070, + 197 + ], + [ + 1080, + 189 + ], + [ + 1085, + 192 + ], + [ + 1086, + 194 + ], + [ + 1088, + 191 + ], + [ + 1088, + 185 + ], + [ + 1093, + 181 + ], + [ + 1108, + 176 + ], + [ + 1113, + 178 + ], + [ + 1119, + 175 + ], + [ + 1128, + 173 + ], + [ + 1135, + 164 + ], + [ + 1135, + 162 + ], + [ + 1135, + 159 + ], + [ + 1141, + 153 + ], + [ + 1144, + 147 + ], + [ + 1145, + 141 + ], + [ + 1152, + 130 + ], + [ + 1165, + 134 + ], + [ + 1169, + 144 + ], + [ + 1174, + 145 + ], + [ + 1183, + 159 + ], + [ + 1211, + 174 + ], + [ + 1244, + 187 + ], + [ + 1272, + 203 + ], + [ + 1292, + 236 + ], + [ + 1296, + 256 + ], + [ + 1294, + 274 + ], + [ + 1280, + 284 + ], + [ + 1267, + 294 + ], + [ + 1253, + 300 + ], + [ + 1258, + 307 + ], + [ + 1259, + 313 + ], + [ + 1252, + 324 + ], + [ + 1249, + 330 + ], + [ + 1238, + 334 + ], + [ + 1236, + 339 + ], + [ + 1222, + 343 + ], + [ + 1214, + 350 + ], + [ + 1217, + 351 + ], + [ + 1225, + 351 + ], + [ + 1231, + 354 + ], + [ + 1222, + 369 + ], + [ + 1233, + 368 + ], + [ + 1240, + 375 + ], + [ + 1241, + 382 + ], + [ + 1226, + 388 + ], + [ + 1226, + 394 + ], + [ + 1223, + 406 + ], + [ + 1212, + 417 + ], + [ + 1209, + 430 + ], + [ + 1208, + 451 + ], + [ + 1201, + 451 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1168, + 508 + ], + [ + 1167, + 489 + ], + [ + 1171, + 471 + ], + [ + 1166, + 465 + ], + [ + 1167, + 461 + ], + [ + 1173, + 460 + ], + [ + 1177, + 460 + ], + [ + 1182, + 453 + ], + [ + 1190, + 446 + ], + [ + 1206, + 444 + ], + [ + 1241, + 443 + ], + [ + 1248, + 470 + ], + [ + 1209, + 512 + ], + [ + 1190, + 512 + ], + [ + 1188, + 518 + ], + [ + 1184, + 523 + ], + [ + 1176, + 519 + ], + [ + 1169, + 515 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1266, + 449 + ], + [ + 1225, + 532 + ], + [ + 1214, + 530 + ], + [ + 1204, + 522 + ], + [ + 1205, + 490 + ], + [ + 1206, + 475 + ], + [ + 1196, + 473 + ], + [ + 1197, + 466 + ], + [ + 1206, + 465 + ], + [ + 1209, + 468 + ], + [ + 1215, + 458 + ], + [ + 1223, + 449 + ], + [ + 1239, + 444 + ], + [ + 1260, + 441 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1314, + 425 + ], + [ + 1324, + 418 + ], + [ + 1354, + 409 + ], + [ + 1368, + 410 + ], + [ + 1380, + 411 + ], + [ + 1391, + 407 + ], + [ + 1397, + 405 + ], + [ + 1415, + 408 + ], + [ + 1425, + 416 + ], + [ + 1434, + 422 + ], + [ + 1438, + 432 + ], + [ + 1426, + 467 + ], + [ + 1398, + 462 + ], + [ + 1311, + 444 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1278, + 371 + ], + [ + 1280, + 447 + ], + [ + 1277, + 446 + ], + [ + 1273, + 363 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1285, + 308 + ], + [ + 1285, + 336 + ], + [ + 1290, + 341 + ], + [ + 1294, + 381 + ], + [ + 1271, + 380 + ], + [ + 1266, + 346 + ], + [ + 1261, + 344 + ], + [ + 1266, + 321 + ], + [ + 1258, + 320 + ], + [ + 1258, + 312 + ], + [ + 1258, + 309 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1300, + 346 + ], + [ + 1304, + 353 + ], + [ + 1302, + 360 + ], + [ + 1301, + 367 + ], + [ + 1305, + 372 + ], + [ + 1308, + 392 + ], + [ + 1299, + 397 + ], + [ + 1294, + 392 + ], + [ + 1291, + 378 + ], + [ + 1292, + 359 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1281, + 291 + ], + [ + 1284, + 245 + ], + [ + 1281, + 223 + ], + [ + 1262, + 211 + ], + [ + 1243, + 214 + ], + [ + 1220, + 215 + ], + [ + 1201, + 212 + ], + [ + 1185, + 198 + ], + [ + 1183, + 175 + ], + [ + 1183, + 154 + ], + [ + 1181, + 141 + ], + [ + 1175, + 132 + ], + [ + 1171, + 131 + ], + [ + 1169, + 127 + ], + [ + 1168, + 122 + ], + [ + 1177, + 121 + ], + [ + 1182, + 120 + ], + [ + 1184, + 115 + ], + [ + 1194, + 110 + ], + [ + 1207, + 104 + ], + [ + 1215, + 97 + ], + [ + 1208, + 94 + ], + [ + 1197, + 93 + ], + [ + 1195, + 91 + ], + [ + 1192, + 85 + ], + [ + 1197, + 78 + ], + [ + 1203, + 76 + ], + [ + 1218, + 76 + ], + [ + 1224, + 73 + ], + [ + 1225, + 64 + ], + [ + 1214, + 60 + ], + [ + 1211, + 59 + ], + [ + 1214, + 48 + ], + [ + 1212, + 42 + ], + [ + 1220, + 32 + ], + [ + 1228, + 25 + ], + [ + 1230, + 19 + ], + [ + 1243, + 12 + ], + [ + 1243, + 9 + ], + [ + 1243, + 0 + ], + [ + 1399, + 0 + ], + [ + 1394, + 12 + ], + [ + 1381, + 25 + ], + [ + 1371, + 45 + ], + [ + 1371, + 48 + ], + [ + 1375, + 56 + ], + [ + 1379, + 69 + ], + [ + 1379, + 76 + ], + [ + 1382, + 87 + ], + [ + 1378, + 100 + ], + [ + 1371, + 101 + ], + [ + 1374, + 108 + ], + [ + 1386, + 116 + ], + [ + 1399, + 117 + ], + [ + 1407, + 121 + ], + [ + 1407, + 138 + ], + [ + 1401, + 160 + ], + [ + 1407, + 170 + ], + [ + 1413, + 174 + ], + [ + 1429, + 178 + ], + [ + 1430, + 191 + ], + [ + 1412, + 197 + ], + [ + 1408, + 206 + ], + [ + 1401, + 211 + ], + [ + 1397, + 212 + ], + [ + 1397, + 215 + ], + [ + 1395, + 222 + ], + [ + 1386, + 227 + ], + [ + 1375, + 226 + ], + [ + 1351, + 234 + ], + [ + 1349, + 242 + ], + [ + 1343, + 243 + ], + [ + 1334, + 244 + ], + [ + 1322, + 243 + ], + [ + 1313, + 238 + ], + [ + 1295, + 232 + ], + [ + 1294, + 233 + ], + [ + 1295, + 290 + ], + [ + 1296, + 310 + ], + [ + 1300, + 338 + ], + [ + 1301, + 380 + ], + [ + 1301, + 398 + ], + [ + 1301, + 414 + ], + [ + 1303, + 430 + ], + [ + 1305, + 454 + ], + [ + 1295, + 452 + ], + [ + 1290, + 408 + ], + [ + 1286, + 329 + ], + [ + 1286, + 310 + ], + [ + 1283, + 297 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1290, + 439 + ], + [ + 1304, + 447 + ], + [ + 1280, + 541 + ], + [ + 1257, + 545 + ], + [ + 1250, + 552 + ], + [ + 1228, + 551 + ], + [ + 1220, + 540 + ], + [ + 1217, + 504 + ], + [ + 1220, + 484 + ], + [ + 1215, + 477 + ], + [ + 1214, + 474 + ], + [ + 1220, + 467 + ], + [ + 1230, + 464 + ], + [ + 1235, + 457 + ], + [ + 1240, + 446 + ], + [ + 1252, + 440 + ], + [ + 1278, + 438 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1281, + 461 + ], + [ + 1286, + 450 + ], + [ + 1294, + 442 + ], + [ + 1308, + 438 + ], + [ + 1344, + 433 + ], + [ + 1382, + 431 + ], + [ + 1407, + 434 + ], + [ + 1426, + 451 + ], + [ + 1445, + 483 + ], + [ + 1451, + 503 + ], + [ + 1455, + 548 + ], + [ + 1452, + 566 + ], + [ + 1433, + 569 + ], + [ + 1424, + 567 + ], + [ + 1414, + 562 + ], + [ + 1396, + 559 + ], + [ + 1391, + 552 + ], + [ + 1371, + 555 + ], + [ + 1355, + 555 + ], + [ + 1347, + 555 + ], + [ + 1343, + 555 + ], + [ + 1335, + 555 + ], + [ + 1328, + 555 + ], + [ + 1313, + 557 + ], + [ + 1308, + 559 + ], + [ + 1308, + 567 + ], + [ + 1302, + 580 + ], + [ + 1294, + 580 + ], + [ + 1285, + 576 + ], + [ + 1280, + 568 + ], + [ + 1265, + 566 + ], + [ + 1261, + 552 + ], + [ + 1260, + 533 + ], + [ + 1259, + 512 + ], + [ + 1262, + 497 + ], + [ + 1265, + 492 + ], + [ + 1265, + 491 + ], + [ + 1253, + 487 + ], + [ + 1251, + 478 + ], + [ + 1254, + 477 + ], + [ + 1266, + 476 + ], + [ + 1269, + 484 + ], + [ + 1272, + 475 + ], + [ + 1275, + 468 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1599, + 308 + ], + [ + 1601, + 451 + ], + [ + 1592, + 454 + ], + [ + 1589, + 312 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1613, + 285 + ], + [ + 1613, + 318 + ], + [ + 1571, + 318 + ], + [ + 1572, + 287 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1612, + 270 + ], + [ + 1613, + 285 + ], + [ + 1570, + 288 + ], + [ + 1571, + 270 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1612, + 254 + ], + [ + 1612, + 270 + ], + [ + 1569, + 270 + ], + [ + 1570, + 254 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1611, + 206 + ], + [ + 1613, + 255 + ], + [ + 1566, + 256 + ], + [ + 1566, + 208 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1430, + 106 + ], + [ + 1453, + 96 + ], + [ + 1455, + 143 + ], + [ + 1440, + 146 + ], + [ + 1440, + 139 + ], + [ + 1433, + 139 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1459, + 93 + ], + [ + 1470, + 595 + ], + [ + 1448, + 596 + ], + [ + 1449, + 93 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1589, + 114 + ], + [ + 1589, + 147 + ], + [ + 1543, + 148 + ], + [ + 1543, + 161 + ], + [ + 1506, + 162 + ], + [ + 1504, + 146 + ], + [ + 1461, + 148 + ], + [ + 1459, + 116 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1423, + 201 + ], + [ + 1442, + 199 + ], + [ + 1442, + 194 + ], + [ + 1474, + 192 + ], + [ + 1474, + 304 + ], + [ + 1447, + 301 + ], + [ + 1447, + 293 + ], + [ + 1422, + 284 + ], + [ + 1422, + 268 + ], + [ + 1449, + 266 + ], + [ + 1449, + 258 + ], + [ + 1422, + 256 + ], + [ + 1422, + 234 + ], + [ + 1442, + 234 + ], + [ + 1443, + 224 + ], + [ + 1421, + 214 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1539, + 284 + ], + [ + 1540, + 311 + ], + [ + 1488, + 314 + ], + [ + 1488, + 288 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1538, + 211 + ], + [ + 1537, + 240 + ], + [ + 1487, + 240 + ], + [ + 1487, + 215 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1530, + 243 + ], + [ + 1537, + 249 + ], + [ + 1539, + 258 + ], + [ + 1540, + 268 + ], + [ + 1535, + 277 + ], + [ + 1529, + 285 + ], + [ + 1517, + 287 + ], + [ + 1499, + 287 + ], + [ + 1488, + 276 + ], + [ + 1485, + 262 + ], + [ + 1488, + 251 + ], + [ + 1493, + 244 + ], + [ + 1501, + 241 + ], + [ + 1518, + 238 + ], + [ + 1527, + 239 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1501, + 163 + ], + [ + 1510, + 162 + ], + [ + 1523, + 166 + ], + [ + 1533, + 175 + ], + [ + 1538, + 190 + ], + [ + 1533, + 200 + ], + [ + 1527, + 209 + ], + [ + 1514, + 213 + ], + [ + 1501, + 213 + ], + [ + 1492, + 210 + ], + [ + 1487, + 201 + ], + [ + 1484, + 187 + ], + [ + 1485, + 178 + ], + [ + 1493, + 167 + ], + [ + 1495, + 164 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1526, + 482 + ], + [ + 1529, + 464 + ], + [ + 1514, + 469 + ], + [ + 1512, + 461 + ], + [ + 1526, + 449 + ], + [ + 1541, + 437 + ], + [ + 1559, + 425 + ], + [ + 1588, + 425 + ], + [ + 1588, + 436 + ], + [ + 1563, + 437 + ], + [ + 1558, + 445 + ], + [ + 1567, + 455 + ], + [ + 1583, + 460 + ], + [ + 1607, + 464 + ], + [ + 1623, + 464 + ], + [ + 1644, + 494 + ], + [ + 1646, + 530 + ], + [ + 1603, + 539 + ], + [ + 1561, + 554 + ], + [ + 1539, + 555 + ], + [ + 1522, + 546 + ], + [ + 1514, + 528 + ], + [ + 1512, + 516 + ], + [ + 1517, + 500 + ], + [ + 1522, + 491 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1862, + 507 + ], + [ + 2048, + 519 + ], + [ + 2048, + 657 + ], + [ + 1981, + 652 + ], + [ + 1903, + 642 + ], + [ + 1862, + 627 + ], + [ + 1852, + 592 + ], + [ + 1851, + 552 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1875, + 479 + ], + [ + 1895, + 468 + ], + [ + 1907, + 463 + ], + [ + 1927, + 463 + ], + [ + 1944, + 472 + ], + [ + 1964, + 479 + ], + [ + 1981, + 477 + ], + [ + 2013, + 468 + ], + [ + 2040, + 464 + ], + [ + 2048, + 460 + ], + [ + 2048, + 537 + ], + [ + 2036, + 556 + ], + [ + 2006, + 562 + ], + [ + 1955, + 560 + ], + [ + 1923, + 554 + ], + [ + 1893, + 553 + ], + [ + 1875, + 571 + ], + [ + 1874, + 584 + ], + [ + 1870, + 600 + ], + [ + 1875, + 621 + ], + [ + 1861, + 638 + ], + [ + 1836, + 643 + ], + [ + 1806, + 645 + ], + [ + 1775, + 638 + ], + [ + 1781, + 618 + ], + [ + 1799, + 599 + ], + [ + 1820, + 575 + ], + [ + 1830, + 557 + ], + [ + 1830, + 533 + ], + [ + 1826, + 519 + ], + [ + 1824, + 499 + ], + [ + 1843, + 484 + ], + [ + 1871, + 482 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1695, + 419 + ], + [ + 1714, + 424 + ], + [ + 1730, + 426 + ], + [ + 1744, + 430 + ], + [ + 1759, + 438 + ], + [ + 1764, + 459 + ], + [ + 1758, + 478 + ], + [ + 1762, + 486 + ], + [ + 1772, + 501 + ], + [ + 1772, + 519 + ], + [ + 1751, + 545 + ], + [ + 1748, + 561 + ], + [ + 1752, + 585 + ], + [ + 1753, + 602 + ], + [ + 1740, + 611 + ], + [ + 1715, + 619 + ], + [ + 1689, + 617 + ], + [ + 1640, + 616 + ], + [ + 1616, + 616 + ], + [ + 1599, + 610 + ], + [ + 1588, + 593 + ], + [ + 1595, + 563 + ], + [ + 1615, + 543 + ], + [ + 1626, + 518 + ], + [ + 1626, + 506 + ], + [ + 1615, + 495 + ], + [ + 1604, + 483 + ], + [ + 1591, + 473 + ], + [ + 1587, + 457 + ], + [ + 1596, + 442 + ], + [ + 1600, + 434 + ], + [ + 1608, + 414 + ], + [ + 1620, + 410 + ], + [ + 1629, + 413 + ], + [ + 1645, + 409 + ], + [ + 1663, + 411 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1569, + 459 + ], + [ + 1581, + 459 + ], + [ + 1609, + 463 + ], + [ + 1602, + 483 + ], + [ + 1601, + 498 + ], + [ + 1604, + 519 + ], + [ + 1617, + 529 + ], + [ + 1622, + 547 + ], + [ + 1619, + 569 + ], + [ + 1614, + 588 + ], + [ + 1610, + 604 + ], + [ + 1603, + 616 + ], + [ + 1591, + 626 + ], + [ + 1571, + 616 + ], + [ + 1569, + 599 + ], + [ + 1559, + 595 + ], + [ + 1561, + 616 + ], + [ + 1552, + 615 + ], + [ + 1546, + 593 + ], + [ + 1539, + 611 + ], + [ + 1524, + 618 + ], + [ + 1514, + 619 + ], + [ + 1505, + 614 + ], + [ + 1493, + 599 + ], + [ + 1488, + 570 + ], + [ + 1491, + 550 + ], + [ + 1502, + 525 + ], + [ + 1518, + 512 + ], + [ + 1527, + 511 + ], + [ + 1528, + 496 + ], + [ + 1523, + 487 + ], + [ + 1531, + 480 + ], + [ + 1535, + 475 + ], + [ + 1531, + 464 + ], + [ + 1521, + 466 + ], + [ + 1515, + 459 + ], + [ + 1523, + 454 + ], + [ + 1534, + 453 + ], + [ + 1545, + 457 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1075, + 474 + ], + [ + 1076, + 466 + ], + [ + 1072, + 465 + ], + [ + 1073, + 461 + ], + [ + 1081, + 459 + ], + [ + 1084, + 452 + ], + [ + 1090, + 451 + ], + [ + 1114, + 450 + ], + [ + 1123, + 452 + ], + [ + 1128, + 462 + ], + [ + 1131, + 462 + ], + [ + 1133, + 464 + ], + [ + 1130, + 467 + ], + [ + 1131, + 484 + ], + [ + 1132, + 492 + ], + [ + 1130, + 497 + ], + [ + 1124, + 498 + ], + [ + 1122, + 492 + ], + [ + 1087, + 491 + ], + [ + 1083, + 497 + ], + [ + 1074, + 496 + ], + [ + 1072, + 493 + ], + [ + 1073, + 481 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 978, + 410 + ], + [ + 978, + 453 + ], + [ + 982, + 458 + ], + [ + 982, + 415 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1003, + 447 + ], + [ + 1024, + 449 + ], + [ + 1031, + 456 + ], + [ + 1037, + 466 + ], + [ + 1038, + 482 + ], + [ + 1038, + 492 + ], + [ + 1038, + 496 + ], + [ + 1032, + 497 + ], + [ + 1025, + 497 + ], + [ + 1025, + 491 + ], + [ + 1012, + 491 + ], + [ + 991, + 482 + ], + [ + 989, + 465 + ], + [ + 992, + 451 + ], + [ + 998, + 449 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 918, + 510 + ], + [ + 917, + 498 + ], + [ + 922, + 477 + ], + [ + 927, + 462 + ], + [ + 934, + 450 + ], + [ + 939, + 447 + ], + [ + 958, + 445 + ], + [ + 984, + 445 + ], + [ + 998, + 446 + ], + [ + 1007, + 458 + ], + [ + 1013, + 466 + ], + [ + 1022, + 466 + ], + [ + 1026, + 468 + ], + [ + 1025, + 474 + ], + [ + 1018, + 476 + ], + [ + 1022, + 491 + ], + [ + 1022, + 515 + ], + [ + 1022, + 524 + ], + [ + 1015, + 527 + ], + [ + 1004, + 525 + ], + [ + 1003, + 517 + ], + [ + 972, + 519 + ], + [ + 940, + 519 + ], + [ + 940, + 525 + ], + [ + 937, + 528 + ], + [ + 927, + 528 + ], + [ + 920, + 524 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1340, + 496 + ], + [ + 1392, + 493 + ], + [ + 1395, + 506 + ], + [ + 1342, + 510 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 940, + 169 + ], + [ + 964, + 168 + ], + [ + 962, + 176 + ], + [ + 939, + 178 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 940, + 169 + ], + [ + 964, + 168 + ], + [ + 962, + 176 + ], + [ + 939, + 178 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1875, + 479 + ], + [ + 1895, + 468 + ], + [ + 1907, + 463 + ], + [ + 1927, + 463 + ], + [ + 1944, + 472 + ], + [ + 1964, + 479 + ], + [ + 1981, + 477 + ], + [ + 2013, + 468 + ], + [ + 2040, + 464 + ], + [ + 2048, + 460 + ], + [ + 2048, + 537 + ], + [ + 2036, + 556 + ], + [ + 2006, + 562 + ], + [ + 1955, + 560 + ], + [ + 1923, + 554 + ], + [ + 1893, + 553 + ], + [ + 1875, + 571 + ], + [ + 1874, + 584 + ], + [ + 1870, + 600 + ], + [ + 1875, + 621 + ], + [ + 1861, + 638 + ], + [ + 1836, + 643 + ], + [ + 1806, + 645 + ], + [ + 1775, + 638 + ], + [ + 1781, + 618 + ], + [ + 1799, + 599 + ], + [ + 1820, + 575 + ], + [ + 1830, + 557 + ], + [ + 1830, + 533 + ], + [ + 1826, + 519 + ], + [ + 1824, + 499 + ], + [ + 1843, + 484 + ], + [ + 1871, + 482 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000014_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000014_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5bfe5afb5ce945d49f7a370d1a5161c5e9caaf02 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000014_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000014_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000014_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..541ec70e2286ced07d7adecbaacc511d747a42bf Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000014_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000015_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000015_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d3c73b619cccc87e0d480e7ca101efd6cbd29184 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000015_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000015_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000015_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..34540dae4c3c608f7da47e6ab8e1dda43d112e15 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000015_000019_gtFine_polygons.json @@ -0,0 +1,5668 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 705, + 1 + ], + [ + 1211, + 1 + ], + [ + 1223, + 332 + ], + [ + 769, + 329 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 914, + 451 + ], + [ + 1086, + 455 + ], + [ + 1255, + 492 + ], + [ + 1513, + 550 + ], + [ + 1754, + 589 + ], + [ + 2048, + 683 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 1, + 759 + ], + [ + 768, + 486 + ], + [ + 895, + 461 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1133, + 297 + ], + [ + 1139, + 471 + ], + [ + 1109, + 477 + ], + [ + 1067, + 472 + ], + [ + 991, + 468 + ], + [ + 962, + 468 + ], + [ + 886, + 474 + ], + [ + 885, + 324 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 922, + 379 + ], + [ + 916, + 370 + ], + [ + 906, + 339 + ], + [ + 908, + 324 + ], + [ + 958, + 276 + ], + [ + 962, + 277 + ], + [ + 981, + 278 + ], + [ + 993, + 280 + ], + [ + 1008, + 284 + ], + [ + 1026, + 276 + ], + [ + 1057, + 277 + ], + [ + 1072, + 297 + ], + [ + 1077, + 326 + ], + [ + 1080, + 367 + ], + [ + 1069, + 385 + ], + [ + 1067, + 388 + ], + [ + 1070, + 394 + ], + [ + 1064, + 400 + ], + [ + 1053, + 401 + ], + [ + 1051, + 405 + ], + [ + 1054, + 416 + ], + [ + 1053, + 432 + ], + [ + 1046, + 450 + ], + [ + 997, + 462 + ], + [ + 988, + 455 + ], + [ + 976, + 451 + ], + [ + 963, + 444 + ], + [ + 944, + 433 + ], + [ + 926, + 410 + ], + [ + 924, + 398 + ], + [ + 922, + 384 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1122, + 446 + ], + [ + 1113, + 445 + ], + [ + 1106, + 444 + ], + [ + 1102, + 439 + ], + [ + 1099, + 436 + ], + [ + 1097, + 430 + ], + [ + 1099, + 420 + ], + [ + 1100, + 415 + ], + [ + 1096, + 407 + ], + [ + 1091, + 398 + ], + [ + 1084, + 398 + ], + [ + 1075, + 390 + ], + [ + 1071, + 378 + ], + [ + 1049, + 360 + ], + [ + 1046, + 321 + ], + [ + 1062, + 284 + ], + [ + 1135, + 236 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1017, + 434 + ], + [ + 1041, + 434 + ], + [ + 1048, + 437 + ], + [ + 1059, + 452 + ], + [ + 1001, + 472 + ], + [ + 989, + 471 + ], + [ + 989, + 466 + ], + [ + 993, + 459 + ], + [ + 1000, + 457 + ], + [ + 1008, + 445 + ], + [ + 1010, + 436 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 0, + 487 + ], + [ + 712, + 473 + ], + [ + 895, + 526 + ], + [ + 861, + 563 + ], + [ + 751, + 628 + ], + [ + 664, + 688 + ], + [ + 554, + 764 + ], + [ + 491, + 808 + ], + [ + 382, + 839 + ], + [ + 299, + 834 + ], + [ + 145, + 828 + ], + [ + 76, + 886 + ], + [ + 0, + 934 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 890, + 241 + ], + [ + 889, + 459 + ], + [ + 544, + 514 + ], + [ + 366, + 533 + ], + [ + 321, + 533 + ], + [ + 320, + 536 + ], + [ + 273, + 544 + ], + [ + 222, + 545 + ], + [ + 196, + 544 + ], + [ + 192, + 550 + ], + [ + 174, + 554 + ], + [ + 128, + 554 + ], + [ + 74, + 546 + ], + [ + 0, + 545 + ], + [ + 2, + 1 + ], + [ + 730, + 0 + ], + [ + 754, + 31 + ], + [ + 741, + 41 + ], + [ + 741, + 64 + ], + [ + 802, + 117 + ], + [ + 840, + 145 + ], + [ + 887, + 220 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1013, + 475 + ], + [ + 1009, + 465 + ], + [ + 1016, + 449 + ], + [ + 1026, + 447 + ], + [ + 1036, + 446 + ], + [ + 1051, + 445 + ], + [ + 1057, + 451 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1022, + 396 + ], + [ + 1014, + 387 + ], + [ + 1004, + 388 + ], + [ + 991, + 384 + ], + [ + 980, + 371 + ], + [ + 968, + 365 + ], + [ + 954, + 353 + ], + [ + 951, + 333 + ], + [ + 956, + 321 + ], + [ + 964, + 307 + ], + [ + 971, + 294 + ], + [ + 976, + 285 + ], + [ + 995, + 271 + ], + [ + 1031, + 255 + ], + [ + 1062, + 256 + ], + [ + 1075, + 273 + ], + [ + 1084, + 296 + ], + [ + 1087, + 324 + ], + [ + 1087, + 349 + ], + [ + 1080, + 371 + ], + [ + 1073, + 386 + ], + [ + 1072, + 393 + ], + [ + 1061, + 398 + ], + [ + 1039, + 391 + ], + [ + 1032, + 391 + ], + [ + 1030, + 401 + ], + [ + 1031, + 451 + ], + [ + 1025, + 454 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1033, + 422 + ], + [ + 1030, + 454 + ], + [ + 1031, + 454 + ], + [ + 1032, + 416 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1037, + 424 + ], + [ + 1037, + 430 + ], + [ + 1027, + 429 + ], + [ + 1027, + 424 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1033, + 408 + ], + [ + 1038, + 410 + ], + [ + 1039, + 417 + ], + [ + 1037, + 422 + ], + [ + 1028, + 424 + ], + [ + 1024, + 422 + ], + [ + 1024, + 411 + ], + [ + 1028, + 408 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1058, + 426 + ], + [ + 1087, + 426 + ], + [ + 1087, + 462 + ], + [ + 1058, + 461 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1000, + 479 + ], + [ + 999, + 472 + ], + [ + 1000, + 461 + ], + [ + 1006, + 456 + ], + [ + 1016, + 450 + ], + [ + 1035, + 445 + ], + [ + 1049, + 445 + ], + [ + 1061, + 448 + ], + [ + 1067, + 450 + ], + [ + 1074, + 460 + ], + [ + 1075, + 473 + ], + [ + 1069, + 478 + ], + [ + 1060, + 478 + ], + [ + 1051, + 479 + ], + [ + 1037, + 480 + ], + [ + 1034, + 476 + ], + [ + 1015, + 476 + ], + [ + 1009, + 479 + ], + [ + 1003, + 480 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1120, + 225 + ], + [ + 1199, + 198 + ], + [ + 1203, + 482 + ], + [ + 1116, + 483 + ], + [ + 1116, + 422 + ], + [ + 1112, + 319 + ], + [ + 1110, + 261 + ], + [ + 1108, + 229 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1060, + 60 + ], + [ + 1060, + 49 + ], + [ + 1060, + 39 + ], + [ + 1070, + 31 + ], + [ + 1070, + 24 + ], + [ + 1073, + 12 + ], + [ + 1078, + 5 + ], + [ + 1098, + 0 + ], + [ + 1210, + 0 + ], + [ + 1202, + 2 + ], + [ + 1191, + 436 + ], + [ + 1173, + 431 + ], + [ + 1169, + 408 + ], + [ + 1168, + 399 + ], + [ + 1169, + 391 + ], + [ + 1177, + 381 + ], + [ + 1183, + 362 + ], + [ + 1183, + 344 + ], + [ + 1176, + 344 + ], + [ + 1149, + 350 + ], + [ + 1136, + 347 + ], + [ + 1119, + 333 + ], + [ + 1102, + 319 + ], + [ + 1088, + 318 + ], + [ + 1060, + 319 + ], + [ + 1035, + 299 + ], + [ + 1031, + 281 + ], + [ + 1023, + 269 + ], + [ + 1002, + 277 + ], + [ + 994, + 283 + ], + [ + 980, + 279 + ], + [ + 977, + 272 + ], + [ + 975, + 265 + ], + [ + 965, + 252 + ], + [ + 968, + 241 + ], + [ + 968, + 234 + ], + [ + 969, + 228 + ], + [ + 962, + 226 + ], + [ + 956, + 223 + ], + [ + 956, + 218 + ], + [ + 966, + 216 + ], + [ + 970, + 214 + ], + [ + 975, + 213 + ], + [ + 976, + 209 + ], + [ + 986, + 201 + ], + [ + 988, + 197 + ], + [ + 984, + 191 + ], + [ + 983, + 186 + ], + [ + 982, + 173 + ], + [ + 981, + 158 + ], + [ + 982, + 145 + ], + [ + 984, + 138 + ], + [ + 984, + 131 + ], + [ + 985, + 126 + ], + [ + 981, + 118 + ], + [ + 979, + 114 + ], + [ + 987, + 108 + ], + [ + 988, + 95 + ], + [ + 985, + 89 + ], + [ + 985, + 83 + ], + [ + 987, + 81 + ], + [ + 996, + 78 + ], + [ + 1004, + 76 + ], + [ + 1008, + 77 + ], + [ + 1012, + 79 + ], + [ + 1019, + 77 + ], + [ + 1021, + 72 + ], + [ + 1021, + 62 + ], + [ + 1027, + 57 + ], + [ + 1032, + 56 + ], + [ + 1036, + 58 + ], + [ + 1036, + 64 + ], + [ + 1035, + 72 + ], + [ + 1036, + 74 + ], + [ + 1038, + 79 + ], + [ + 1044, + 74 + ], + [ + 1045, + 69 + ], + [ + 1049, + 66 + ], + [ + 1057, + 62 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1089, + 441 + ], + [ + 1105, + 442 + ], + [ + 1113, + 454 + ], + [ + 1116, + 465 + ], + [ + 1116, + 473 + ], + [ + 1114, + 479 + ], + [ + 1111, + 482 + ], + [ + 1104, + 483 + ], + [ + 1101, + 478 + ], + [ + 1086, + 478 + ], + [ + 1077, + 478 + ], + [ + 1076, + 483 + ], + [ + 1072, + 483 + ], + [ + 1067, + 482 + ], + [ + 1064, + 478 + ], + [ + 1064, + 473 + ], + [ + 1064, + 464 + ], + [ + 1065, + 460 + ], + [ + 1067, + 451 + ], + [ + 1072, + 446 + ], + [ + 1075, + 442 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1271, + 470 + ], + [ + 1614, + 487 + ], + [ + 1845, + 519 + ], + [ + 2048, + 547 + ], + [ + 2048, + 834 + ], + [ + 1640, + 693 + ], + [ + 1391, + 593 + ], + [ + 1263, + 534 + ], + [ + 1197, + 520 + ], + [ + 1157, + 507 + ], + [ + 1126, + 490 + ], + [ + 1114, + 483 + ], + [ + 1118, + 479 + ], + [ + 1165, + 477 + ], + [ + 1186, + 471 + ], + [ + 1221, + 470 + ], + [ + 1255, + 472 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1167, + 56 + ], + [ + 1182, + 15 + ], + [ + 1182, + 0 + ], + [ + 2047, + 1 + ], + [ + 2048, + 567 + ], + [ + 1624, + 526 + ], + [ + 1474, + 513 + ], + [ + 1385, + 511 + ], + [ + 1373, + 511 + ], + [ + 1302, + 505 + ], + [ + 1209, + 482 + ], + [ + 1184, + 474 + ], + [ + 1181, + 391 + ], + [ + 1178, + 218 + ], + [ + 1176, + 71 + ], + [ + 1165, + 63 + ], + [ + 1165, + 60 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1213, + 510 + ], + [ + 1203, + 505 + ], + [ + 1196, + 502 + ], + [ + 1190, + 493 + ], + [ + 1188, + 477 + ], + [ + 1188, + 474 + ], + [ + 1185, + 469 + ], + [ + 1184, + 464 + ], + [ + 1188, + 455 + ], + [ + 1189, + 447 + ], + [ + 1188, + 444 + ], + [ + 1180, + 431 + ], + [ + 1180, + 427 + ], + [ + 1195, + 425 + ], + [ + 1201, + 415 + ], + [ + 1201, + 405 + ], + [ + 1203, + 398 + ], + [ + 1199, + 395 + ], + [ + 1189, + 395 + ], + [ + 1186, + 393 + ], + [ + 1187, + 384 + ], + [ + 1197, + 380 + ], + [ + 1201, + 374 + ], + [ + 1201, + 369 + ], + [ + 1196, + 363 + ], + [ + 1194, + 351 + ], + [ + 1198, + 341 + ], + [ + 1221, + 349 + ], + [ + 1231, + 356 + ], + [ + 1244, + 360 + ], + [ + 1255, + 355 + ], + [ + 1261, + 353 + ], + [ + 1268, + 353 + ], + [ + 1279, + 353 + ], + [ + 1279, + 322 + ], + [ + 1276, + 302 + ], + [ + 1270, + 295 + ], + [ + 1266, + 282 + ], + [ + 1266, + 273 + ], + [ + 1260, + 270 + ], + [ + 1269, + 262 + ], + [ + 1276, + 256 + ], + [ + 1273, + 246 + ], + [ + 1252, + 244 + ], + [ + 1257, + 233 + ], + [ + 1247, + 230 + ], + [ + 1228, + 234 + ], + [ + 1213, + 240 + ], + [ + 1211, + 247 + ], + [ + 1200, + 258 + ], + [ + 1186, + 248 + ], + [ + 1185, + 228 + ], + [ + 1180, + 219 + ], + [ + 1169, + 231 + ], + [ + 1159, + 236 + ], + [ + 1150, + 235 + ], + [ + 1149, + 224 + ], + [ + 1153, + 214 + ], + [ + 1144, + 207 + ], + [ + 1140, + 198 + ], + [ + 1140, + 189 + ], + [ + 1140, + 180 + ], + [ + 1138, + 160 + ], + [ + 1142, + 155 + ], + [ + 1160, + 153 + ], + [ + 1177, + 148 + ], + [ + 1176, + 124 + ], + [ + 1170, + 115 + ], + [ + 1169, + 102 + ], + [ + 1176, + 85 + ], + [ + 1184, + 79 + ], + [ + 1199, + 76 + ], + [ + 1202, + 65 + ], + [ + 1219, + 55 + ], + [ + 1236, + 51 + ], + [ + 1251, + 52 + ], + [ + 1265, + 50 + ], + [ + 1291, + 48 + ], + [ + 1308, + 56 + ], + [ + 1323, + 71 + ], + [ + 1335, + 73 + ], + [ + 1344, + 73 + ], + [ + 1356, + 75 + ], + [ + 1371, + 83 + ], + [ + 1384, + 94 + ], + [ + 1387, + 107 + ], + [ + 1383, + 117 + ], + [ + 1374, + 119 + ], + [ + 1370, + 120 + ], + [ + 1369, + 125 + ], + [ + 1378, + 129 + ], + [ + 1389, + 131 + ], + [ + 1396, + 140 + ], + [ + 1404, + 137 + ], + [ + 1415, + 141 + ], + [ + 1418, + 146 + ], + [ + 1418, + 157 + ], + [ + 1418, + 165 + ], + [ + 1417, + 175 + ], + [ + 1408, + 180 + ], + [ + 1401, + 184 + ], + [ + 1405, + 193 + ], + [ + 1402, + 197 + ], + [ + 1395, + 202 + ], + [ + 1394, + 208 + ], + [ + 1399, + 214 + ], + [ + 1393, + 227 + ], + [ + 1390, + 228 + ], + [ + 1379, + 228 + ], + [ + 1365, + 228 + ], + [ + 1356, + 228 + ], + [ + 1351, + 223 + ], + [ + 1335, + 228 + ], + [ + 1336, + 231 + ], + [ + 1345, + 241 + ], + [ + 1343, + 246 + ], + [ + 1335, + 242 + ], + [ + 1330, + 251 + ], + [ + 1323, + 256 + ], + [ + 1319, + 254 + ], + [ + 1313, + 250 + ], + [ + 1304, + 249 + ], + [ + 1291, + 251 + ], + [ + 1287, + 256 + ], + [ + 1289, + 272 + ], + [ + 1302, + 273 + ], + [ + 1309, + 276 + ], + [ + 1309, + 290 + ], + [ + 1308, + 302 + ], + [ + 1305, + 311 + ], + [ + 1294, + 317 + ], + [ + 1293, + 326 + ], + [ + 1293, + 370 + ], + [ + 1294, + 372 + ], + [ + 1296, + 381 + ], + [ + 1298, + 386 + ], + [ + 1301, + 391 + ], + [ + 1308, + 397 + ], + [ + 1321, + 399 + ], + [ + 1327, + 398 + ], + [ + 1335, + 398 + ], + [ + 1345, + 408 + ], + [ + 1355, + 400 + ], + [ + 1363, + 395 + ], + [ + 1372, + 394 + ], + [ + 1374, + 399 + ], + [ + 1360, + 420 + ], + [ + 1356, + 425 + ], + [ + 1360, + 433 + ], + [ + 1361, + 441 + ], + [ + 1363, + 450 + ], + [ + 1369, + 457 + ], + [ + 1370, + 466 + ], + [ + 1349, + 476 + ], + [ + 1349, + 484 + ], + [ + 1362, + 477 + ], + [ + 1373, + 477 + ], + [ + 1374, + 483 + ], + [ + 1370, + 490 + ], + [ + 1358, + 506 + ], + [ + 1351, + 512 + ], + [ + 1313, + 505 + ], + [ + 1304, + 506 + ], + [ + 1279, + 506 + ], + [ + 1254, + 510 + ], + [ + 1222, + 512 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1197, + 533 + ], + [ + 1201, + 515 + ], + [ + 1208, + 505 + ], + [ + 1229, + 505 + ], + [ + 1273, + 505 + ], + [ + 1268, + 534 + ], + [ + 1213, + 534 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1140, + 405 + ], + [ + 1140, + 460 + ], + [ + 1137, + 457 + ], + [ + 1137, + 410 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1129, + 399 + ], + [ + 1130, + 469 + ], + [ + 1128, + 465 + ], + [ + 1128, + 410 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1147, + 407 + ], + [ + 1149, + 418 + ], + [ + 1142, + 421 + ], + [ + 1142, + 432 + ], + [ + 1137, + 432 + ], + [ + 1131, + 427 + ], + [ + 1130, + 416 + ], + [ + 1130, + 404 + ], + [ + 1134, + 397 + ], + [ + 1158, + 398 + ], + [ + 1158, + 408 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1124, + 406 + ], + [ + 1130, + 403 + ], + [ + 1133, + 403 + ], + [ + 1136, + 408 + ], + [ + 1139, + 413 + ], + [ + 1132, + 417 + ], + [ + 1127, + 416 + ], + [ + 1124, + 411 + ], + [ + 1123, + 410 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1132, + 384 + ], + [ + 1138, + 389 + ], + [ + 1141, + 394 + ], + [ + 1141, + 397 + ], + [ + 1133, + 406 + ], + [ + 1127, + 405 + ], + [ + 1123, + 402 + ], + [ + 1121, + 396 + ], + [ + 1121, + 392 + ], + [ + 1126, + 387 + ], + [ + 1128, + 386 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1149, + 438 + ], + [ + 1142, + 447 + ], + [ + 1145, + 457 + ], + [ + 1144, + 476 + ], + [ + 1142, + 483 + ], + [ + 1132, + 486 + ], + [ + 1126, + 485 + ], + [ + 1118, + 477 + ], + [ + 1119, + 465 + ], + [ + 1119, + 452 + ], + [ + 1130, + 449 + ], + [ + 1127, + 441 + ], + [ + 1137, + 441 + ] + ] + }, + { + "label": "polegroup", + "polygon": [ + [ + 1153, + 450 + ], + [ + 1177, + 460 + ], + [ + 1175, + 497 + ], + [ + 1154, + 488 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 846, + 439 + ], + [ + 852, + 410 + ], + [ + 855, + 380 + ], + [ + 832, + 369 + ], + [ + 834, + 187 + ], + [ + 895, + 183 + ], + [ + 922, + 187 + ], + [ + 940, + 196 + ], + [ + 948, + 204 + ], + [ + 948, + 217 + ], + [ + 956, + 225 + ], + [ + 966, + 229 + ], + [ + 973, + 242 + ], + [ + 978, + 258 + ], + [ + 975, + 287 + ], + [ + 962, + 383 + ], + [ + 952, + 403 + ], + [ + 945, + 415 + ], + [ + 938, + 429 + ], + [ + 947, + 435 + ], + [ + 951, + 437 + ], + [ + 954, + 450 + ], + [ + 949, + 464 + ], + [ + 913, + 473 + ], + [ + 865, + 459 + ], + [ + 848, + 443 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 948, + 450 + ], + [ + 962, + 453 + ], + [ + 970, + 459 + ], + [ + 981, + 462 + ], + [ + 986, + 464 + ], + [ + 987, + 467 + ], + [ + 982, + 470 + ], + [ + 952, + 471 + ], + [ + 925, + 472 + ], + [ + 922, + 455 + ], + [ + 930, + 450 + ], + [ + 945, + 449 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 983, + 449 + ], + [ + 982, + 473 + ], + [ + 981, + 471 + ], + [ + 981, + 453 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 972, + 445 + ], + [ + 972, + 474 + ], + [ + 971, + 472 + ], + [ + 969, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 922, + 464 + ], + [ + 945, + 466 + ], + [ + 952, + 472 + ], + [ + 954, + 477 + ], + [ + 955, + 483 + ], + [ + 947, + 487 + ], + [ + 933, + 485 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 876, + 447 + ], + [ + 905, + 446 + ], + [ + 919, + 450 + ], + [ + 938, + 461 + ], + [ + 944, + 482 + ], + [ + 941, + 489 + ], + [ + 931, + 497 + ], + [ + 922, + 501 + ], + [ + 891, + 503 + ], + [ + 879, + 488 + ], + [ + 872, + 464 + ], + [ + 874, + 452 + ], + [ + 875, + 451 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 817, + 446 + ], + [ + 813, + 435 + ], + [ + 807, + 423 + ], + [ + 795, + 428 + ], + [ + 785, + 432 + ], + [ + 773, + 428 + ], + [ + 758, + 423 + ], + [ + 745, + 426 + ], + [ + 735, + 418 + ], + [ + 726, + 408 + ], + [ + 719, + 389 + ], + [ + 719, + 363 + ], + [ + 727, + 351 + ], + [ + 735, + 343 + ], + [ + 740, + 337 + ], + [ + 738, + 329 + ], + [ + 725, + 321 + ], + [ + 711, + 318 + ], + [ + 703, + 305 + ], + [ + 704, + 275 + ], + [ + 701, + 264 + ], + [ + 693, + 259 + ], + [ + 694, + 249 + ], + [ + 703, + 239 + ], + [ + 697, + 232 + ], + [ + 688, + 237 + ], + [ + 682, + 232 + ], + [ + 681, + 228 + ], + [ + 683, + 225 + ], + [ + 685, + 219 + ], + [ + 685, + 209 + ], + [ + 669, + 213 + ], + [ + 665, + 204 + ], + [ + 660, + 196 + ], + [ + 657, + 189 + ], + [ + 659, + 178 + ], + [ + 670, + 173 + ], + [ + 694, + 175 + ], + [ + 708, + 170 + ], + [ + 715, + 163 + ], + [ + 717, + 156 + ], + [ + 711, + 149 + ], + [ + 703, + 149 + ], + [ + 692, + 141 + ], + [ + 689, + 134 + ], + [ + 688, + 129 + ], + [ + 692, + 118 + ], + [ + 700, + 112 + ], + [ + 704, + 103 + ], + [ + 711, + 101 + ], + [ + 718, + 102 + ], + [ + 725, + 103 + ], + [ + 729, + 101 + ], + [ + 730, + 93 + ], + [ + 726, + 86 + ], + [ + 725, + 83 + ], + [ + 731, + 76 + ], + [ + 736, + 68 + ], + [ + 740, + 61 + ], + [ + 750, + 53 + ], + [ + 762, + 47 + ], + [ + 773, + 47 + ], + [ + 781, + 46 + ], + [ + 790, + 42 + ], + [ + 797, + 38 + ], + [ + 804, + 38 + ], + [ + 810, + 36 + ], + [ + 814, + 30 + ], + [ + 821, + 26 + ], + [ + 829, + 22 + ], + [ + 841, + 22 + ], + [ + 845, + 23 + ], + [ + 847, + 31 + ], + [ + 840, + 39 + ], + [ + 841, + 39 + ], + [ + 846, + 40 + ], + [ + 843, + 53 + ], + [ + 847, + 56 + ], + [ + 858, + 52 + ], + [ + 866, + 57 + ], + [ + 872, + 52 + ], + [ + 875, + 48 + ], + [ + 895, + 46 + ], + [ + 902, + 46 + ], + [ + 907, + 57 + ], + [ + 905, + 63 + ], + [ + 901, + 72 + ], + [ + 893, + 82 + ], + [ + 905, + 89 + ], + [ + 913, + 92 + ], + [ + 926, + 94 + ], + [ + 937, + 101 + ], + [ + 937, + 112 + ], + [ + 935, + 117 + ], + [ + 934, + 117 + ], + [ + 927, + 117 + ], + [ + 920, + 112 + ], + [ + 919, + 119 + ], + [ + 919, + 123 + ], + [ + 924, + 130 + ], + [ + 929, + 134 + ], + [ + 934, + 135 + ], + [ + 942, + 133 + ], + [ + 952, + 133 + ], + [ + 957, + 138 + ], + [ + 967, + 144 + ], + [ + 976, + 144 + ], + [ + 989, + 141 + ], + [ + 1001, + 143 + ], + [ + 1008, + 151 + ], + [ + 1002, + 164 + ], + [ + 994, + 166 + ], + [ + 983, + 166 + ], + [ + 973, + 170 + ], + [ + 962, + 173 + ], + [ + 957, + 175 + ], + [ + 949, + 180 + ], + [ + 942, + 185 + ], + [ + 942, + 194 + ], + [ + 946, + 200 + ], + [ + 947, + 211 + ], + [ + 947, + 223 + ], + [ + 953, + 230 + ], + [ + 956, + 241 + ], + [ + 954, + 252 + ], + [ + 944, + 264 + ], + [ + 925, + 275 + ], + [ + 894, + 287 + ], + [ + 880, + 287 + ], + [ + 860, + 281 + ], + [ + 848, + 272 + ], + [ + 837, + 265 + ], + [ + 836, + 297 + ], + [ + 837, + 377 + ], + [ + 839, + 405 + ], + [ + 836, + 438 + ], + [ + 835, + 445 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 704, + 354 + ], + [ + 711, + 358 + ], + [ + 714, + 399 + ], + [ + 707, + 398 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 706, + 351 + ], + [ + 717, + 446 + ], + [ + 711, + 446 + ], + [ + 702, + 350 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 743, + 443 + ], + [ + 759, + 435 + ], + [ + 812, + 435 + ], + [ + 836, + 437 + ], + [ + 859, + 453 + ], + [ + 861, + 472 + ], + [ + 765, + 461 + ], + [ + 747, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 652, + 454 + ], + [ + 683, + 440 + ], + [ + 726, + 439 + ], + [ + 779, + 445 + ], + [ + 816, + 447 + ], + [ + 839, + 460 + ], + [ + 858, + 482 + ], + [ + 743, + 498 + ], + [ + 651, + 476 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 670, + 471 + ], + [ + 681, + 465 + ], + [ + 701, + 453 + ], + [ + 716, + 446 + ], + [ + 755, + 443 + ], + [ + 794, + 444 + ], + [ + 806, + 448 + ], + [ + 815, + 455 + ], + [ + 833, + 465 + ], + [ + 847, + 471 + ], + [ + 857, + 481 + ], + [ + 851, + 498 + ], + [ + 777, + 516 + ], + [ + 726, + 516 + ], + [ + 671, + 504 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 778, + 343 + ], + [ + 783, + 345 + ], + [ + 792, + 351 + ], + [ + 793, + 359 + ], + [ + 792, + 369 + ], + [ + 788, + 373 + ], + [ + 780, + 375 + ], + [ + 775, + 374 + ], + [ + 767, + 369 + ], + [ + 766, + 359 + ], + [ + 767, + 352 + ], + [ + 773, + 344 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 780, + 340 + ], + [ + 782, + 497 + ], + [ + 777, + 495 + ], + [ + 773, + 340 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 851, + 517 + ], + [ + 927, + 519 + ], + [ + 928, + 547 + ], + [ + 876, + 550 + ], + [ + 873, + 545 + ], + [ + 850, + 543 + ], + [ + 848, + 525 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 670, + 508 + ], + [ + 679, + 505 + ], + [ + 681, + 500 + ], + [ + 689, + 488 + ], + [ + 695, + 483 + ], + [ + 702, + 489 + ], + [ + 704, + 494 + ], + [ + 707, + 495 + ], + [ + 722, + 491 + ], + [ + 729, + 488 + ], + [ + 723, + 481 + ], + [ + 722, + 475 + ], + [ + 724, + 468 + ], + [ + 735, + 470 + ], + [ + 741, + 476 + ], + [ + 748, + 478 + ], + [ + 762, + 473 + ], + [ + 770, + 473 + ], + [ + 778, + 468 + ], + [ + 784, + 465 + ], + [ + 790, + 469 + ], + [ + 795, + 474 + ], + [ + 798, + 474 + ], + [ + 803, + 474 + ], + [ + 810, + 468 + ], + [ + 818, + 465 + ], + [ + 825, + 465 + ], + [ + 817, + 432 + ], + [ + 820, + 400 + ], + [ + 821, + 376 + ], + [ + 821, + 371 + ], + [ + 821, + 361 + ], + [ + 813, + 351 + ], + [ + 807, + 343 + ], + [ + 799, + 348 + ], + [ + 787, + 351 + ], + [ + 773, + 350 + ], + [ + 751, + 326 + ], + [ + 743, + 320 + ], + [ + 731, + 309 + ], + [ + 727, + 288 + ], + [ + 726, + 261 + ], + [ + 727, + 230 + ], + [ + 739, + 208 + ], + [ + 751, + 196 + ], + [ + 767, + 175 + ], + [ + 773, + 174 + ], + [ + 788, + 167 + ], + [ + 804, + 154 + ], + [ + 817, + 149 + ], + [ + 839, + 160 + ], + [ + 861, + 163 + ], + [ + 876, + 167 + ], + [ + 895, + 189 + ], + [ + 909, + 205 + ], + [ + 920, + 224 + ], + [ + 931, + 237 + ], + [ + 935, + 258 + ], + [ + 912, + 314 + ], + [ + 895, + 320 + ], + [ + 879, + 337 + ], + [ + 867, + 340 + ], + [ + 848, + 339 + ], + [ + 839, + 339 + ], + [ + 836, + 369 + ], + [ + 839, + 401 + ], + [ + 839, + 434 + ], + [ + 843, + 436 + ], + [ + 844, + 425 + ], + [ + 849, + 409 + ], + [ + 854, + 397 + ], + [ + 858, + 376 + ], + [ + 876, + 376 + ], + [ + 887, + 379 + ], + [ + 900, + 379 + ], + [ + 914, + 384 + ], + [ + 924, + 385 + ], + [ + 931, + 392 + ], + [ + 937, + 406 + ], + [ + 942, + 417 + ], + [ + 916, + 446 + ], + [ + 915, + 453 + ], + [ + 916, + 461 + ], + [ + 919, + 470 + ], + [ + 926, + 475 + ], + [ + 930, + 482 + ], + [ + 930, + 490 + ], + [ + 923, + 501 + ], + [ + 916, + 501 + ], + [ + 905, + 498 + ], + [ + 902, + 501 + ], + [ + 919, + 503 + ], + [ + 933, + 514 + ], + [ + 935, + 520 + ], + [ + 937, + 530 + ], + [ + 928, + 543 + ], + [ + 917, + 542 + ], + [ + 909, + 532 + ], + [ + 892, + 527 + ], + [ + 887, + 524 + ], + [ + 897, + 533 + ], + [ + 903, + 540 + ], + [ + 903, + 546 + ], + [ + 887, + 554 + ], + [ + 876, + 556 + ], + [ + 872, + 547 + ], + [ + 852, + 545 + ], + [ + 846, + 539 + ], + [ + 837, + 538 + ], + [ + 832, + 534 + ], + [ + 821, + 531 + ], + [ + 784, + 537 + ], + [ + 731, + 539 + ], + [ + 707, + 539 + ], + [ + 673, + 529 + ], + [ + 667, + 516 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 594, + 266 + ], + [ + 597, + 410 + ], + [ + 590, + 405 + ], + [ + 589, + 263 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 575, + 239 + ], + [ + 584, + 228 + ], + [ + 584, + 224 + ], + [ + 589, + 219 + ], + [ + 594, + 221 + ], + [ + 597, + 223 + ], + [ + 608, + 242 + ], + [ + 608, + 244 + ], + [ + 607, + 247 + ], + [ + 605, + 259 + ], + [ + 599, + 270 + ], + [ + 591, + 271 + ], + [ + 585, + 268 + ], + [ + 578, + 260 + ], + [ + 576, + 247 + ], + [ + 575, + 246 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 564, + 393 + ], + [ + 639, + 388 + ], + [ + 668, + 413 + ], + [ + 676, + 422 + ], + [ + 689, + 545 + ], + [ + 681, + 555 + ], + [ + 681, + 560 + ], + [ + 553, + 568 + ], + [ + 547, + 563 + ], + [ + 538, + 422 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 298, + 287 + ], + [ + 359, + 287 + ], + [ + 365, + 291 + ], + [ + 366, + 332 + ], + [ + 364, + 336 + ], + [ + 305, + 339 + ], + [ + 299, + 334 + ], + [ + 288, + 334 + ], + [ + 290, + 328 + ], + [ + 296, + 326 + ], + [ + 297, + 296 + ], + [ + 288, + 295 + ], + [ + 289, + 289 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 39, + 462 + ], + [ + 39, + 571 + ], + [ + 28, + 570 + ], + [ + 28, + 458 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 21, + 545 + ], + [ + 28, + 558 + ], + [ + 31, + 659 + ], + [ + 9, + 663 + ], + [ + 0, + 662 + ], + [ + 0, + 545 + ], + [ + 9, + 544 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 78, + 343 + ], + [ + 67, + 307 + ], + [ + 63, + 297 + ], + [ + 53, + 290 + ], + [ + 43, + 281 + ], + [ + 39, + 274 + ], + [ + 29, + 274 + ], + [ + 20, + 265 + ], + [ + 14, + 262 + ], + [ + 0, + 258 + ], + [ + 1, + 0 + ], + [ + 693, + 1 + ], + [ + 694, + 3 + ], + [ + 699, + 11 + ], + [ + 700, + 20 + ], + [ + 698, + 26 + ], + [ + 699, + 35 + ], + [ + 699, + 42 + ], + [ + 700, + 51 + ], + [ + 703, + 58 + ], + [ + 703, + 61 + ], + [ + 703, + 68 + ], + [ + 696, + 73 + ], + [ + 690, + 77 + ], + [ + 689, + 83 + ], + [ + 689, + 94 + ], + [ + 685, + 103 + ], + [ + 680, + 116 + ], + [ + 671, + 124 + ], + [ + 662, + 132 + ], + [ + 659, + 143 + ], + [ + 648, + 145 + ], + [ + 637, + 156 + ], + [ + 626, + 156 + ], + [ + 617, + 159 + ], + [ + 601, + 170 + ], + [ + 597, + 177 + ], + [ + 590, + 192 + ], + [ + 579, + 201 + ], + [ + 576, + 211 + ], + [ + 576, + 215 + ], + [ + 569, + 218 + ], + [ + 559, + 222 + ], + [ + 549, + 229 + ], + [ + 542, + 231 + ], + [ + 527, + 227 + ], + [ + 516, + 226 + ], + [ + 505, + 223 + ], + [ + 499, + 217 + ], + [ + 488, + 216 + ], + [ + 476, + 224 + ], + [ + 467, + 226 + ], + [ + 454, + 223 + ], + [ + 442, + 226 + ], + [ + 434, + 223 + ], + [ + 424, + 219 + ], + [ + 418, + 223 + ], + [ + 413, + 230 + ], + [ + 402, + 237 + ], + [ + 395, + 238 + ], + [ + 387, + 233 + ], + [ + 380, + 225 + ], + [ + 371, + 224 + ], + [ + 365, + 223 + ], + [ + 359, + 220 + ], + [ + 354, + 216 + ], + [ + 349, + 216 + ], + [ + 345, + 213 + ], + [ + 344, + 207 + ], + [ + 344, + 203 + ], + [ + 339, + 200 + ], + [ + 336, + 198 + ], + [ + 331, + 193 + ], + [ + 327, + 194 + ], + [ + 314, + 193 + ], + [ + 307, + 193 + ], + [ + 299, + 200 + ], + [ + 287, + 203 + ], + [ + 281, + 203 + ], + [ + 286, + 213 + ], + [ + 284, + 220 + ], + [ + 280, + 229 + ], + [ + 282, + 237 + ], + [ + 281, + 241 + ], + [ + 282, + 248 + ], + [ + 281, + 252 + ], + [ + 266, + 255 + ], + [ + 249, + 258 + ], + [ + 230, + 255 + ], + [ + 219, + 247 + ], + [ + 206, + 250 + ], + [ + 204, + 260 + ], + [ + 193, + 259 + ], + [ + 185, + 259 + ], + [ + 185, + 269 + ], + [ + 193, + 281 + ], + [ + 191, + 285 + ], + [ + 182, + 287 + ], + [ + 178, + 295 + ], + [ + 175, + 306 + ], + [ + 163, + 313 + ], + [ + 151, + 317 + ], + [ + 143, + 321 + ], + [ + 138, + 396 + ], + [ + 139, + 494 + ], + [ + 141, + 549 + ], + [ + 144, + 567 + ], + [ + 151, + 587 + ], + [ + 159, + 601 + ], + [ + 125, + 603 + ], + [ + 93, + 603 + ], + [ + 75, + 603 + ], + [ + 59, + 600 + ], + [ + 51, + 597 + ], + [ + 61, + 578 + ], + [ + 77, + 542 + ], + [ + 86, + 451 + ], + [ + 82, + 364 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1947, + 81 + ], + [ + 1951, + 81 + ], + [ + 1952, + 90 + ], + [ + 1981, + 78 + ], + [ + 2016, + 59 + ], + [ + 2048, + 45 + ], + [ + 2048, + 43 + ], + [ + 2047, + 105 + ], + [ + 2037, + 118 + ], + [ + 2032, + 112 + ], + [ + 2030, + 100 + ], + [ + 1917, + 102 + ], + [ + 1919, + 94 + ], + [ + 1929, + 86 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1730, + 0 + ], + [ + 1741, + 365 + ], + [ + 1714, + 388 + ], + [ + 1712, + 1 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1707, + 94 + ], + [ + 1726, + 93 + ], + [ + 1741, + 97 + ], + [ + 1750, + 107 + ], + [ + 1755, + 131 + ], + [ + 1749, + 155 + ], + [ + 1737, + 168 + ], + [ + 1710, + 174 + ], + [ + 1697, + 170 + ], + [ + 1685, + 159 + ], + [ + 1681, + 129 + ], + [ + 1688, + 110 + ], + [ + 1693, + 104 + ], + [ + 1699, + 100 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1568, + 366 + ], + [ + 1572, + 368 + ], + [ + 1576, + 376 + ], + [ + 1578, + 380 + ], + [ + 1583, + 388 + ], + [ + 1594, + 395 + ], + [ + 1598, + 408 + ], + [ + 1596, + 428 + ], + [ + 1596, + 435 + ], + [ + 1597, + 448 + ], + [ + 1594, + 460 + ], + [ + 1594, + 477 + ], + [ + 1592, + 487 + ], + [ + 1600, + 510 + ], + [ + 1599, + 520 + ], + [ + 1598, + 529 + ], + [ + 1596, + 536 + ], + [ + 1585, + 532 + ], + [ + 1584, + 509 + ], + [ + 1581, + 499 + ], + [ + 1571, + 468 + ], + [ + 1569, + 488 + ], + [ + 1568, + 529 + ], + [ + 1566, + 533 + ], + [ + 1561, + 533 + ], + [ + 1555, + 531 + ], + [ + 1552, + 526 + ], + [ + 1552, + 510 + ], + [ + 1551, + 490 + ], + [ + 1552, + 464 + ], + [ + 1552, + 459 + ], + [ + 1551, + 445 + ], + [ + 1553, + 430 + ], + [ + 1545, + 424 + ], + [ + 1543, + 413 + ], + [ + 1545, + 405 + ], + [ + 1554, + 393 + ], + [ + 1557, + 389 + ], + [ + 1553, + 382 + ], + [ + 1555, + 374 + ], + [ + 1563, + 368 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1441, + 402 + ], + [ + 1449, + 381 + ], + [ + 1458, + 372 + ], + [ + 1462, + 364 + ], + [ + 1461, + 352 + ], + [ + 1466, + 345 + ], + [ + 1476, + 344 + ], + [ + 1483, + 349 + ], + [ + 1487, + 359 + ], + [ + 1487, + 368 + ], + [ + 1492, + 374 + ], + [ + 1502, + 377 + ], + [ + 1509, + 389 + ], + [ + 1510, + 419 + ], + [ + 1508, + 451 + ], + [ + 1504, + 461 + ], + [ + 1497, + 468 + ], + [ + 1493, + 495 + ], + [ + 1490, + 527 + ], + [ + 1488, + 536 + ], + [ + 1479, + 541 + ], + [ + 1473, + 537 + ], + [ + 1473, + 532 + ], + [ + 1474, + 518 + ], + [ + 1472, + 506 + ], + [ + 1470, + 498 + ], + [ + 1470, + 488 + ], + [ + 1466, + 466 + ], + [ + 1460, + 446 + ], + [ + 1457, + 429 + ], + [ + 1454, + 436 + ], + [ + 1453, + 447 + ], + [ + 1452, + 461 + ], + [ + 1446, + 460 + ], + [ + 1441, + 450 + ], + [ + 1440, + 424 + ], + [ + 1440, + 407 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1694, + 411 + ], + [ + 1723, + 370 + ], + [ + 1771, + 336 + ], + [ + 1862, + 308 + ], + [ + 1964, + 282 + ], + [ + 2021, + 270 + ], + [ + 2048, + 266 + ], + [ + 2048, + 909 + ], + [ + 2036, + 903 + ], + [ + 2030, + 933 + ], + [ + 2016, + 961 + ], + [ + 2003, + 970 + ], + [ + 1976, + 969 + ], + [ + 1941, + 955 + ], + [ + 1912, + 917 + ], + [ + 1896, + 866 + ], + [ + 1892, + 839 + ], + [ + 1797, + 797 + ], + [ + 1737, + 766 + ], + [ + 1734, + 782 + ], + [ + 1721, + 797 + ], + [ + 1698, + 797 + ], + [ + 1666, + 787 + ], + [ + 1650, + 773 + ], + [ + 1620, + 660 + ], + [ + 1616, + 601 + ], + [ + 1619, + 570 + ], + [ + 1624, + 554 + ], + [ + 1632, + 539 + ], + [ + 1620, + 533 + ], + [ + 1611, + 527 + ], + [ + 1610, + 514 + ], + [ + 1624, + 500 + ], + [ + 1653, + 484 + ], + [ + 1665, + 464 + ], + [ + 1683, + 425 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1565, + 470 + ], + [ + 1570, + 477 + ], + [ + 1571, + 488 + ], + [ + 1573, + 496 + ], + [ + 1577, + 501 + ], + [ + 1575, + 514 + ], + [ + 1574, + 525 + ], + [ + 1579, + 535 + ], + [ + 1574, + 546 + ], + [ + 1571, + 543 + ], + [ + 1569, + 530 + ], + [ + 1559, + 519 + ], + [ + 1552, + 533 + ], + [ + 1549, + 530 + ], + [ + 1552, + 520 + ], + [ + 1552, + 510 + ], + [ + 1547, + 506 + ], + [ + 1526, + 491 + ], + [ + 1506, + 475 + ], + [ + 1502, + 463 + ], + [ + 1498, + 459 + ], + [ + 1499, + 452 + ], + [ + 1502, + 450 + ], + [ + 1504, + 454 + ], + [ + 1512, + 474 + ], + [ + 1522, + 483 + ], + [ + 1548, + 497 + ], + [ + 1548, + 491 + ], + [ + 1550, + 491 + ], + [ + 1554, + 487 + ], + [ + 1558, + 478 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 176, + 648 + ], + [ + 205, + 635 + ], + [ + 228, + 629 + ], + [ + 272, + 625 + ], + [ + 313, + 633 + ], + [ + 340, + 649 + ], + [ + 349, + 839 + ], + [ + 284, + 859 + ], + [ + 219, + 861 + ], + [ + 173, + 851 + ], + [ + 143, + 829 + ], + [ + 144, + 771 + ], + [ + 145, + 740 + ], + [ + 147, + 706 + ], + [ + 152, + 682 + ], + [ + 161, + 658 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000016_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000016_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..2054c665cec7030009252a531121e6c7a5cddb5f Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000016_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000016_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000016_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d81bbb7c2325ac03c2ed85890bf7cab17b0ae1ac Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000016_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000017_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000017_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..1931fcb06c3560b3758f7a9a634ba25a87b1aae2 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000017_000019_gtFine_polygons.json @@ -0,0 +1,6128 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 713, + 4 + ], + [ + 1283, + 3 + ], + [ + 1176, + 380 + ], + [ + 930, + 361 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 887, + 479 + ], + [ + 1179, + 450 + ], + [ + 1272, + 469 + ], + [ + 1405, + 473 + ], + [ + 1635, + 586 + ], + [ + 2048, + 803 + ], + [ + 2048, + 1024 + ], + [ + 2, + 1024 + ], + [ + 1, + 593 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1168, + 444 + ], + [ + 1272, + 432 + ], + [ + 1401, + 445 + ], + [ + 1393, + 489 + ], + [ + 1349, + 491 + ], + [ + 1280, + 494 + ], + [ + 1248, + 494 + ], + [ + 1229, + 490 + ], + [ + 1193, + 477 + ], + [ + 1173, + 467 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1645, + 545 + ], + [ + 2015, + 623 + ], + [ + 1859, + 796 + ], + [ + 1696, + 783 + ], + [ + 1664, + 770 + ], + [ + 1549, + 685 + ], + [ + 1472, + 629 + ], + [ + 1464, + 618 + ], + [ + 1470, + 604 + ], + [ + 1543, + 595 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 812, + 472 + ], + [ + 993, + 464 + ], + [ + 961, + 519 + ], + [ + 857, + 523 + ], + [ + 837, + 518 + ], + [ + 827, + 503 + ], + [ + 818, + 491 + ], + [ + 815, + 483 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 171, + 527 + ], + [ + 488, + 506 + ], + [ + 533, + 579 + ], + [ + 447, + 602 + ], + [ + 407, + 611 + ], + [ + 203, + 623 + ], + [ + 35, + 633 + ], + [ + 21, + 623 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 976, + 312 + ], + [ + 973, + 435 + ], + [ + 431, + 505 + ], + [ + 360, + 2 + ], + [ + 750, + 3 + ], + [ + 754, + 8 + ], + [ + 755, + 7 + ], + [ + 765, + 6 + ], + [ + 772, + 11 + ], + [ + 774, + 14 + ], + [ + 768, + 17 + ], + [ + 769, + 28 + ], + [ + 791, + 61 + ], + [ + 794, + 69 + ], + [ + 793, + 74 + ], + [ + 828, + 123 + ], + [ + 889, + 198 + ], + [ + 903, + 204 + ], + [ + 909, + 214 + ], + [ + 919, + 215 + ], + [ + 920, + 262 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 2 + ], + [ + 423, + 2 + ], + [ + 432, + 187 + ], + [ + 435, + 190 + ], + [ + 434, + 199 + ], + [ + 430, + 199 + ], + [ + 432, + 246 + ], + [ + 442, + 248 + ], + [ + 464, + 256 + ], + [ + 464, + 266 + ], + [ + 435, + 269 + ], + [ + 436, + 319 + ], + [ + 441, + 322 + ], + [ + 481, + 352 + ], + [ + 436, + 352 + ], + [ + 440, + 482 + ], + [ + 386, + 532 + ], + [ + 200, + 546 + ], + [ + 0, + 544 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1122, + 434 + ], + [ + 1117, + 228 + ], + [ + 1192, + 148 + ], + [ + 1229, + 90 + ], + [ + 1251, + 53 + ], + [ + 1250, + 37 + ], + [ + 1254, + 19 + ], + [ + 1271, + 16 + ], + [ + 1280, + 5 + ], + [ + 1274, + 0 + ], + [ + 1609, + 1 + ], + [ + 1607, + 491 + ], + [ + 1349, + 464 + ], + [ + 1284, + 461 + ], + [ + 1230, + 452 + ], + [ + 1141, + 448 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1575, + 415 + ], + [ + 1574, + 1 + ], + [ + 2047, + 2 + ], + [ + 2047, + 669 + ], + [ + 1610, + 544 + ], + [ + 1588, + 517 + ], + [ + 1584, + 487 + ], + [ + 1580, + 459 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 920, + 427 + ], + [ + 920, + 396 + ], + [ + 919, + 385 + ], + [ + 911, + 379 + ], + [ + 899, + 381 + ], + [ + 887, + 388 + ], + [ + 879, + 377 + ], + [ + 875, + 347 + ], + [ + 881, + 299 + ], + [ + 897, + 281 + ], + [ + 913, + 265 + ], + [ + 927, + 259 + ], + [ + 933, + 257 + ], + [ + 941, + 253 + ], + [ + 950, + 247 + ], + [ + 958, + 250 + ], + [ + 965, + 251 + ], + [ + 975, + 252 + ], + [ + 985, + 261 + ], + [ + 996, + 270 + ], + [ + 1003, + 276 + ], + [ + 1016, + 278 + ], + [ + 1026, + 289 + ], + [ + 1032, + 296 + ], + [ + 1038, + 309 + ], + [ + 1031, + 318 + ], + [ + 1041, + 322 + ], + [ + 1054, + 319 + ], + [ + 1068, + 319 + ], + [ + 1068, + 326 + ], + [ + 1056, + 331 + ], + [ + 1049, + 341 + ], + [ + 1055, + 348 + ], + [ + 1067, + 350 + ], + [ + 1075, + 349 + ], + [ + 1088, + 349 + ], + [ + 1102, + 354 + ], + [ + 1112, + 372 + ], + [ + 1127, + 424 + ], + [ + 1098, + 443 + ], + [ + 991, + 449 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1121, + 436 + ], + [ + 1119, + 417 + ], + [ + 1109, + 400 + ], + [ + 1099, + 372 + ], + [ + 1099, + 358 + ], + [ + 1094, + 344 + ], + [ + 1083, + 337 + ], + [ + 1071, + 335 + ], + [ + 1059, + 331 + ], + [ + 1050, + 325 + ], + [ + 1052, + 319 + ], + [ + 1061, + 311 + ], + [ + 1063, + 299 + ], + [ + 1057, + 297 + ], + [ + 1049, + 296 + ], + [ + 1038, + 294 + ], + [ + 1031, + 288 + ], + [ + 1031, + 281 + ], + [ + 1031, + 270 + ], + [ + 1025, + 260 + ], + [ + 1027, + 252 + ], + [ + 1035, + 244 + ], + [ + 1036, + 233 + ], + [ + 1028, + 227 + ], + [ + 1012, + 228 + ], + [ + 999, + 231 + ], + [ + 991, + 229 + ], + [ + 991, + 225 + ], + [ + 999, + 212 + ], + [ + 998, + 201 + ], + [ + 996, + 189 + ], + [ + 997, + 172 + ], + [ + 993, + 164 + ], + [ + 985, + 164 + ], + [ + 982, + 164 + ], + [ + 981, + 157 + ], + [ + 990, + 148 + ], + [ + 979, + 140 + ], + [ + 976, + 126 + ], + [ + 982, + 114 + ], + [ + 989, + 118 + ], + [ + 997, + 122 + ], + [ + 1006, + 119 + ], + [ + 1006, + 103 + ], + [ + 1056, + 99 + ], + [ + 1070, + 99 + ], + [ + 1112, + 132 + ], + [ + 1154, + 245 + ], + [ + 1155, + 281 + ], + [ + 1156, + 295 + ], + [ + 1159, + 330 + ], + [ + 1162, + 351 + ], + [ + 1166, + 378 + ], + [ + 1167, + 419 + ], + [ + 1168, + 443 + ], + [ + 1139, + 457 + ], + [ + 1119, + 448 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1180, + 426 + ], + [ + 1176, + 300 + ], + [ + 1172, + 284 + ], + [ + 1166, + 274 + ], + [ + 1142, + 259 + ], + [ + 1133, + 252 + ], + [ + 1118, + 237 + ], + [ + 1100, + 225 + ], + [ + 1076, + 214 + ], + [ + 1058, + 203 + ], + [ + 1045, + 190 + ], + [ + 1027, + 173 + ], + [ + 1019, + 168 + ], + [ + 1019, + 146 + ], + [ + 1027, + 127 + ], + [ + 1026, + 109 + ], + [ + 1016, + 101 + ], + [ + 1006, + 99 + ], + [ + 1004, + 92 + ], + [ + 1009, + 90 + ], + [ + 1018, + 81 + ], + [ + 1022, + 69 + ], + [ + 1042, + 54 + ], + [ + 1072, + 50 + ], + [ + 1113, + 57 + ], + [ + 1146, + 62 + ], + [ + 1175, + 95 + ], + [ + 1197, + 143 + ], + [ + 1193, + 221 + ], + [ + 1197, + 241 + ], + [ + 1196, + 258 + ], + [ + 1188, + 286 + ], + [ + 1184, + 344 + ], + [ + 1189, + 442 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1209, + 414 + ], + [ + 1220, + 417 + ], + [ + 1230, + 430 + ], + [ + 1235, + 449 + ], + [ + 1233, + 460 + ], + [ + 1212, + 466 + ], + [ + 1191, + 466 + ], + [ + 1176, + 466 + ], + [ + 1171, + 451 + ], + [ + 1169, + 438 + ], + [ + 1170, + 430 + ], + [ + 1176, + 418 + ], + [ + 1198, + 415 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1127, + 447 + ], + [ + 1132, + 435 + ], + [ + 1135, + 431 + ], + [ + 1155, + 430 + ], + [ + 1165, + 433 + ], + [ + 1175, + 442 + ], + [ + 1180, + 456 + ], + [ + 1180, + 466 + ], + [ + 1177, + 469 + ], + [ + 1172, + 476 + ], + [ + 1164, + 476 + ], + [ + 1144, + 472 + ], + [ + 1133, + 459 + ], + [ + 1128, + 451 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1366, + 541 + ], + [ + 1467, + 622 + ], + [ + 1468, + 605 + ], + [ + 1525, + 598 + ], + [ + 1550, + 557 + ], + [ + 1460, + 488 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1196, + 367 + ], + [ + 1197, + 314 + ], + [ + 1196, + 302 + ], + [ + 1192, + 284 + ], + [ + 1187, + 263 + ], + [ + 1180, + 242 + ], + [ + 1172, + 226 + ], + [ + 1132, + 210 + ], + [ + 1106, + 200 + ], + [ + 1085, + 185 + ], + [ + 1048, + 138 + ], + [ + 1040, + 102 + ], + [ + 1050, + 83 + ], + [ + 1051, + 69 + ], + [ + 1048, + 54 + ], + [ + 1053, + 47 + ], + [ + 1064, + 32 + ], + [ + 1068, + 21 + ], + [ + 1072, + 1 + ], + [ + 1081, + 0 + ], + [ + 1370, + 0 + ], + [ + 1367, + 5 + ], + [ + 1377, + 21 + ], + [ + 1380, + 45 + ], + [ + 1376, + 62 + ], + [ + 1336, + 89 + ], + [ + 1344, + 104 + ], + [ + 1349, + 138 + ], + [ + 1336, + 157 + ], + [ + 1293, + 178 + ], + [ + 1280, + 187 + ], + [ + 1259, + 201 + ], + [ + 1252, + 226 + ], + [ + 1255, + 236 + ], + [ + 1255, + 245 + ], + [ + 1249, + 262 + ], + [ + 1252, + 314 + ], + [ + 1257, + 387 + ], + [ + 1261, + 470 + ], + [ + 1255, + 474 + ], + [ + 1241, + 475 + ], + [ + 1241, + 437 + ], + [ + 1238, + 360 + ], + [ + 1237, + 327 + ], + [ + 1228, + 279 + ], + [ + 1227, + 259 + ], + [ + 1223, + 244 + ], + [ + 1221, + 253 + ], + [ + 1208, + 265 + ], + [ + 1211, + 299 + ], + [ + 1211, + 348 + ], + [ + 1213, + 406 + ], + [ + 1218, + 460 + ], + [ + 1218, + 467 + ], + [ + 1204, + 469 + ], + [ + 1199, + 419 + ], + [ + 1197, + 410 + ], + [ + 1195, + 377 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1200, + 439 + ], + [ + 1203, + 439 + ], + [ + 1208, + 446 + ], + [ + 1215, + 442 + ], + [ + 1225, + 439 + ], + [ + 1230, + 449 + ], + [ + 1231, + 456 + ], + [ + 1226, + 470 + ], + [ + 1218, + 471 + ], + [ + 1213, + 468 + ], + [ + 1210, + 459 + ], + [ + 1201, + 462 + ], + [ + 1199, + 471 + ], + [ + 1193, + 471 + ], + [ + 1183, + 467 + ], + [ + 1184, + 454 + ], + [ + 1193, + 441 + ], + [ + 1196, + 438 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1266, + 357 + ], + [ + 1266, + 481 + ], + [ + 1265, + 481 + ], + [ + 1264, + 353 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1268, + 333 + ], + [ + 1268, + 371 + ], + [ + 1258, + 372 + ], + [ + 1258, + 332 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1269, + 369 + ], + [ + 1275, + 376 + ], + [ + 1276, + 385 + ], + [ + 1276, + 390 + ], + [ + 1271, + 394 + ], + [ + 1266, + 395 + ], + [ + 1260, + 395 + ], + [ + 1253, + 391 + ], + [ + 1253, + 380 + ], + [ + 1260, + 372 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1234, + 391 + ], + [ + 1233, + 383 + ], + [ + 1239, + 375 + ], + [ + 1248, + 374 + ], + [ + 1252, + 381 + ], + [ + 1252, + 391 + ], + [ + 1250, + 394 + ], + [ + 1242, + 394 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1280, + 448 + ], + [ + 1280, + 485 + ], + [ + 1277, + 487 + ], + [ + 1277, + 451 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1366, + 541 + ], + [ + 1467, + 622 + ], + [ + 1468, + 605 + ], + [ + 1525, + 598 + ], + [ + 1550, + 557 + ], + [ + 1460, + 488 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1327, + 449 + ], + [ + 1330, + 490 + ], + [ + 1327, + 490 + ], + [ + 1322, + 449 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1475, + 360 + ], + [ + 1476, + 419 + ], + [ + 1472, + 415 + ], + [ + 1470, + 359 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1443, + 291 + ], + [ + 1442, + 344 + ], + [ + 1425, + 342 + ], + [ + 1422, + 289 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1490, + 336 + ], + [ + 1489, + 350 + ], + [ + 1482, + 353 + ], + [ + 1481, + 363 + ], + [ + 1475, + 388 + ], + [ + 1468, + 366 + ], + [ + 1464, + 366 + ], + [ + 1462, + 360 + ], + [ + 1452, + 360 + ], + [ + 1450, + 353 + ], + [ + 1443, + 353 + ], + [ + 1440, + 339 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1509, + 280 + ], + [ + 1512, + 417 + ], + [ + 1511, + 415 + ], + [ + 1505, + 278 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1515, + 233 + ], + [ + 1515, + 281 + ], + [ + 1501, + 280 + ], + [ + 1499, + 229 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1367, + 423 + ], + [ + 1392, + 422 + ], + [ + 1412, + 422 + ], + [ + 1379, + 496 + ], + [ + 1365, + 521 + ], + [ + 1354, + 521 + ], + [ + 1350, + 515 + ], + [ + 1343, + 515 + ], + [ + 1338, + 506 + ], + [ + 1338, + 487 + ], + [ + 1340, + 478 + ], + [ + 1344, + 466 + ], + [ + 1345, + 460 + ], + [ + 1340, + 456 + ], + [ + 1339, + 453 + ], + [ + 1340, + 449 + ], + [ + 1345, + 448 + ], + [ + 1352, + 448 + ], + [ + 1360, + 430 + ], + [ + 1361, + 426 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1418, + 420 + ], + [ + 1385, + 526 + ], + [ + 1366, + 519 + ], + [ + 1361, + 505 + ], + [ + 1361, + 478 + ], + [ + 1366, + 465 + ], + [ + 1353, + 461 + ], + [ + 1353, + 456 + ], + [ + 1361, + 452 + ], + [ + 1366, + 452 + ], + [ + 1372, + 454 + ], + [ + 1377, + 437 + ], + [ + 1392, + 425 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1405, + 528 + ], + [ + 1389, + 540 + ], + [ + 1361, + 541 + ], + [ + 1360, + 537 + ], + [ + 1358, + 531 + ], + [ + 1364, + 525 + ], + [ + 1396, + 523 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1410, + 491 + ], + [ + 1381, + 522 + ], + [ + 1369, + 526 + ], + [ + 1366, + 533 + ], + [ + 1349, + 536 + ], + [ + 1361, + 521 + ], + [ + 1363, + 509 + ], + [ + 1369, + 497 + ], + [ + 1377, + 489 + ], + [ + 1389, + 485 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1425, + 445 + ], + [ + 1408, + 431 + ], + [ + 1397, + 362 + ], + [ + 1394, + 311 + ], + [ + 1391, + 270 + ], + [ + 1391, + 229 + ], + [ + 1390, + 214 + ], + [ + 1377, + 188 + ], + [ + 1353, + 147 + ], + [ + 1336, + 111 + ], + [ + 1336, + 90 + ], + [ + 1326, + 63 + ], + [ + 1318, + 21 + ], + [ + 1313, + 0 + ], + [ + 1332, + 1 + ], + [ + 1335, + 32 + ], + [ + 1350, + 88 + ], + [ + 1358, + 110 + ], + [ + 1369, + 137 + ], + [ + 1391, + 181 + ], + [ + 1395, + 177 + ], + [ + 1393, + 103 + ], + [ + 1383, + 51 + ], + [ + 1379, + 20 + ], + [ + 1367, + 0 + ], + [ + 1410, + 0 + ], + [ + 1416, + 22 + ], + [ + 1428, + 98 + ], + [ + 1432, + 175 + ], + [ + 1426, + 257 + ], + [ + 1439, + 370 + ], + [ + 1446, + 424 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1454, + 409 + ], + [ + 1485, + 408 + ], + [ + 1518, + 408 + ], + [ + 1560, + 411 + ], + [ + 1556, + 521 + ], + [ + 1525, + 541 + ], + [ + 1484, + 544 + ], + [ + 1464, + 548 + ], + [ + 1445, + 553 + ], + [ + 1433, + 560 + ], + [ + 1426, + 564 + ], + [ + 1406, + 562 + ], + [ + 1405, + 552 + ], + [ + 1400, + 551 + ], + [ + 1390, + 551 + ], + [ + 1383, + 546 + ], + [ + 1377, + 533 + ], + [ + 1378, + 495 + ], + [ + 1381, + 478 + ], + [ + 1391, + 470 + ], + [ + 1378, + 470 + ], + [ + 1373, + 463 + ], + [ + 1376, + 457 + ], + [ + 1388, + 453 + ], + [ + 1397, + 456 + ], + [ + 1399, + 445 + ], + [ + 1408, + 426 + ], + [ + 1423, + 414 + ], + [ + 1448, + 409 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1855, + 795 + ], + [ + 1697, + 783 + ], + [ + 1987, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 865 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1537, + 517 + ], + [ + 1543, + 374 + ], + [ + 1543, + 311 + ], + [ + 1541, + 249 + ], + [ + 1542, + 106 + ], + [ + 1546, + 74 + ], + [ + 1549, + 31 + ], + [ + 1549, + 12 + ], + [ + 1548, + 0 + ], + [ + 1605, + 0 + ], + [ + 1600, + 18 + ], + [ + 1602, + 80 + ], + [ + 1596, + 139 + ], + [ + 1597, + 214 + ], + [ + 1601, + 376 + ], + [ + 1601, + 431 + ], + [ + 1603, + 497 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1686, + 385 + ], + [ + 1679, + 370 + ], + [ + 1674, + 346 + ], + [ + 1677, + 322 + ], + [ + 1683, + 298 + ], + [ + 1699, + 281 + ], + [ + 1708, + 270 + ], + [ + 1712, + 269 + ], + [ + 1723, + 275 + ], + [ + 1740, + 289 + ], + [ + 1756, + 305 + ], + [ + 1764, + 323 + ], + [ + 1772, + 343 + ], + [ + 1775, + 351 + ], + [ + 1777, + 366 + ], + [ + 1764, + 381 + ], + [ + 1754, + 380 + ], + [ + 1748, + 424 + ], + [ + 1738, + 478 + ], + [ + 1730, + 519 + ], + [ + 1709, + 467 + ], + [ + 1692, + 407 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1667, + 468 + ], + [ + 1688, + 461 + ], + [ + 1715, + 464 + ], + [ + 1724, + 464 + ], + [ + 1730, + 464 + ], + [ + 1734, + 525 + ], + [ + 1697, + 521 + ], + [ + 1673, + 498 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1855, + 795 + ], + [ + 1697, + 783 + ], + [ + 1987, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 865 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1870, + 227 + ], + [ + 1873, + 477 + ], + [ + 1862, + 477 + ], + [ + 1857, + 225 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1697, + 161 + ], + [ + 1712, + 145 + ], + [ + 1798, + 97 + ], + [ + 2048, + 77 + ], + [ + 2048, + 225 + ], + [ + 1858, + 229 + ], + [ + 1661, + 243 + ], + [ + 1544, + 255 + ], + [ + 1562, + 239 + ], + [ + 1576, + 215 + ], + [ + 1628, + 194 + ], + [ + 1683, + 163 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1772, + 497 + ], + [ + 1769, + 476 + ], + [ + 1807, + 466 + ], + [ + 1824, + 464 + ], + [ + 1858, + 471 + ], + [ + 1880, + 461 + ], + [ + 1906, + 454 + ], + [ + 1927, + 454 + ], + [ + 1957, + 461 + ], + [ + 1965, + 469 + ], + [ + 1962, + 503 + ], + [ + 1960, + 545 + ], + [ + 1903, + 589 + ], + [ + 1792, + 547 + ], + [ + 1768, + 511 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1902, + 477 + ], + [ + 1903, + 567 + ], + [ + 1875, + 570 + ], + [ + 1875, + 479 + ], + [ + 1882, + 473 + ], + [ + 1888, + 473 + ], + [ + 1894, + 475 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1808, + 478 + ], + [ + 1806, + 526 + ], + [ + 1793, + 521 + ], + [ + 1790, + 518 + ], + [ + 1789, + 493 + ], + [ + 1789, + 479 + ], + [ + 1794, + 473 + ], + [ + 1799, + 473 + ], + [ + 1807, + 475 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1891, + 608 + ], + [ + 1847, + 734 + ], + [ + 1787, + 740 + ], + [ + 1727, + 729 + ], + [ + 1673, + 717 + ], + [ + 1656, + 717 + ], + [ + 1645, + 706 + ], + [ + 1638, + 692 + ], + [ + 1619, + 681 + ], + [ + 1579, + 660 + ], + [ + 1555, + 642 + ], + [ + 1540, + 642 + ], + [ + 1528, + 635 + ], + [ + 1526, + 577 + ], + [ + 1568, + 570 + ], + [ + 1720, + 578 + ], + [ + 1836, + 590 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1661, + 408 + ], + [ + 1668, + 415 + ], + [ + 1681, + 417 + ], + [ + 1686, + 421 + ], + [ + 1688, + 431 + ], + [ + 1691, + 442 + ], + [ + 1695, + 452 + ], + [ + 1695, + 464 + ], + [ + 1703, + 481 + ], + [ + 1717, + 492 + ], + [ + 1728, + 496 + ], + [ + 1738, + 499 + ], + [ + 1748, + 491 + ], + [ + 1759, + 484 + ], + [ + 1770, + 489 + ], + [ + 1792, + 489 + ], + [ + 1818, + 489 + ], + [ + 1838, + 499 + ], + [ + 1848, + 505 + ], + [ + 1859, + 524 + ], + [ + 1872, + 545 + ], + [ + 1895, + 547 + ], + [ + 1902, + 536 + ], + [ + 1918, + 519 + ], + [ + 1951, + 501 + ], + [ + 1983, + 520 + ], + [ + 1990, + 625 + ], + [ + 1882, + 634 + ], + [ + 1860, + 628 + ], + [ + 1847, + 633 + ], + [ + 1834, + 641 + ], + [ + 1827, + 644 + ], + [ + 1798, + 649 + ], + [ + 1783, + 629 + ], + [ + 1765, + 638 + ], + [ + 1750, + 638 + ], + [ + 1713, + 638 + ], + [ + 1702, + 638 + ], + [ + 1685, + 626 + ], + [ + 1670, + 626 + ], + [ + 1651, + 631 + ], + [ + 1631, + 631 + ], + [ + 1615, + 623 + ], + [ + 1606, + 609 + ], + [ + 1595, + 606 + ], + [ + 1584, + 614 + ], + [ + 1578, + 628 + ], + [ + 1571, + 648 + ], + [ + 1555, + 649 + ], + [ + 1552, + 643 + ], + [ + 1552, + 627 + ], + [ + 1561, + 610 + ], + [ + 1567, + 597 + ], + [ + 1558, + 587 + ], + [ + 1532, + 587 + ], + [ + 1519, + 573 + ], + [ + 1503, + 574 + ], + [ + 1494, + 570 + ], + [ + 1489, + 565 + ], + [ + 1494, + 556 + ], + [ + 1496, + 547 + ], + [ + 1490, + 543 + ], + [ + 1490, + 530 + ], + [ + 1488, + 513 + ], + [ + 1494, + 490 + ], + [ + 1501, + 481 + ], + [ + 1517, + 470 + ], + [ + 1524, + 456 + ], + [ + 1530, + 445 + ], + [ + 1535, + 426 + ], + [ + 1541, + 412 + ], + [ + 1561, + 400 + ], + [ + 1574, + 421 + ], + [ + 1583, + 438 + ], + [ + 1604, + 451 + ], + [ + 1613, + 442 + ], + [ + 1625, + 433 + ], + [ + 1625, + 421 + ], + [ + 1628, + 413 + ], + [ + 1639, + 410 + ], + [ + 1649, + 410 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1989, + 464 + ], + [ + 2021, + 456 + ], + [ + 2048, + 450 + ], + [ + 2048, + 486 + ], + [ + 1992, + 532 + ], + [ + 1988, + 508 + ], + [ + 1985, + 482 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1661, + 408 + ], + [ + 1668, + 415 + ], + [ + 1681, + 417 + ], + [ + 1686, + 421 + ], + [ + 1688, + 431 + ], + [ + 1691, + 442 + ], + [ + 1695, + 452 + ], + [ + 1695, + 464 + ], + [ + 1703, + 481 + ], + [ + 1717, + 492 + ], + [ + 1728, + 496 + ], + [ + 1738, + 499 + ], + [ + 1748, + 491 + ], + [ + 1759, + 484 + ], + [ + 1770, + 489 + ], + [ + 1792, + 489 + ], + [ + 1818, + 489 + ], + [ + 1838, + 499 + ], + [ + 1848, + 505 + ], + [ + 1859, + 524 + ], + [ + 1872, + 545 + ], + [ + 1895, + 547 + ], + [ + 1902, + 536 + ], + [ + 1918, + 519 + ], + [ + 1951, + 501 + ], + [ + 1983, + 520 + ], + [ + 1990, + 625 + ], + [ + 1882, + 634 + ], + [ + 1860, + 628 + ], + [ + 1847, + 633 + ], + [ + 1834, + 641 + ], + [ + 1827, + 644 + ], + [ + 1798, + 649 + ], + [ + 1783, + 629 + ], + [ + 1765, + 638 + ], + [ + 1750, + 638 + ], + [ + 1713, + 638 + ], + [ + 1702, + 638 + ], + [ + 1685, + 626 + ], + [ + 1670, + 626 + ], + [ + 1651, + 631 + ], + [ + 1631, + 631 + ], + [ + 1615, + 623 + ], + [ + 1606, + 609 + ], + [ + 1595, + 606 + ], + [ + 1584, + 614 + ], + [ + 1578, + 628 + ], + [ + 1571, + 648 + ], + [ + 1555, + 649 + ], + [ + 1552, + 643 + ], + [ + 1552, + 627 + ], + [ + 1561, + 610 + ], + [ + 1567, + 597 + ], + [ + 1558, + 587 + ], + [ + 1532, + 587 + ], + [ + 1519, + 573 + ], + [ + 1503, + 574 + ], + [ + 1494, + 570 + ], + [ + 1489, + 565 + ], + [ + 1494, + 556 + ], + [ + 1496, + 547 + ], + [ + 1490, + 543 + ], + [ + 1490, + 530 + ], + [ + 1488, + 513 + ], + [ + 1494, + 490 + ], + [ + 1501, + 481 + ], + [ + 1517, + 470 + ], + [ + 1524, + 456 + ], + [ + 1530, + 445 + ], + [ + 1535, + 426 + ], + [ + 1541, + 412 + ], + [ + 1561, + 400 + ], + [ + 1574, + 421 + ], + [ + 1583, + 438 + ], + [ + 1604, + 451 + ], + [ + 1613, + 442 + ], + [ + 1625, + 433 + ], + [ + 1625, + 421 + ], + [ + 1628, + 413 + ], + [ + 1639, + 410 + ], + [ + 1649, + 410 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1890, + 599 + ], + [ + 1911, + 579 + ], + [ + 1933, + 562 + ], + [ + 1933, + 548 + ], + [ + 1941, + 522 + ], + [ + 1951, + 499 + ], + [ + 1969, + 484 + ], + [ + 1975, + 479 + ], + [ + 1986, + 482 + ], + [ + 1994, + 505 + ], + [ + 2023, + 482 + ], + [ + 2048, + 452 + ], + [ + 2048, + 933 + ], + [ + 2011, + 922 + ], + [ + 1987, + 915 + ], + [ + 1981, + 929 + ], + [ + 1959, + 940 + ], + [ + 1932, + 941 + ], + [ + 1901, + 925 + ], + [ + 1875, + 893 + ], + [ + 1861, + 823 + ], + [ + 1851, + 784 + ], + [ + 1847, + 747 + ], + [ + 1843, + 703 + ], + [ + 1857, + 635 + ], + [ + 1867, + 621 + ], + [ + 1882, + 606 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1010, + 369 + ], + [ + 1011, + 424 + ], + [ + 1007, + 422 + ], + [ + 1009, + 371 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1016, + 399 + ], + [ + 1016, + 408 + ], + [ + 1014, + 412 + ], + [ + 1009, + 413 + ], + [ + 1003, + 412 + ], + [ + 1002, + 405 + ], + [ + 1006, + 399 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 988, + 393 + ], + [ + 1013, + 395 + ], + [ + 1013, + 399 + ], + [ + 1000, + 400 + ], + [ + 1000, + 404 + ], + [ + 994, + 403 + ], + [ + 994, + 402 + ], + [ + 988, + 401 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1072, + 386 + ], + [ + 1073, + 398 + ], + [ + 1065, + 397 + ], + [ + 1067, + 384 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1045, + 389 + ], + [ + 1070, + 386 + ], + [ + 1070, + 390 + ], + [ + 1051, + 392 + ], + [ + 1049, + 419 + ], + [ + 1046, + 419 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1041, + 381 + ], + [ + 1048, + 381 + ], + [ + 1048, + 393 + ], + [ + 1041, + 393 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1089, + 381 + ], + [ + 1089, + 394 + ], + [ + 1085, + 394 + ], + [ + 1086, + 379 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 713, + 236 + ], + [ + 713, + 339 + ], + [ + 686, + 338 + ], + [ + 685, + 231 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 699, + 120 + ], + [ + 714, + 111 + ], + [ + 718, + 104 + ], + [ + 726, + 88 + ], + [ + 727, + 72 + ], + [ + 736, + 50 + ], + [ + 746, + 45 + ], + [ + 759, + 37 + ], + [ + 768, + 41 + ], + [ + 781, + 45 + ], + [ + 793, + 42 + ], + [ + 807, + 43 + ], + [ + 814, + 45 + ], + [ + 827, + 42 + ], + [ + 834, + 41 + ], + [ + 837, + 51 + ], + [ + 844, + 55 + ], + [ + 847, + 62 + ], + [ + 851, + 73 + ], + [ + 856, + 76 + ], + [ + 865, + 77 + ], + [ + 870, + 89 + ], + [ + 872, + 96 + ], + [ + 878, + 104 + ], + [ + 881, + 106 + ], + [ + 877, + 120 + ], + [ + 883, + 135 + ], + [ + 892, + 140 + ], + [ + 896, + 142 + ], + [ + 902, + 147 + ], + [ + 908, + 153 + ], + [ + 908, + 165 + ], + [ + 916, + 169 + ], + [ + 914, + 184 + ], + [ + 907, + 194 + ], + [ + 912, + 200 + ], + [ + 913, + 208 + ], + [ + 916, + 235 + ], + [ + 920, + 242 + ], + [ + 925, + 256 + ], + [ + 929, + 297 + ], + [ + 931, + 316 + ], + [ + 926, + 345 + ], + [ + 917, + 362 + ], + [ + 906, + 379 + ], + [ + 904, + 384 + ], + [ + 899, + 398 + ], + [ + 891, + 397 + ], + [ + 883, + 398 + ], + [ + 879, + 402 + ], + [ + 876, + 420 + ], + [ + 865, + 420 + ], + [ + 865, + 407 + ], + [ + 862, + 394 + ], + [ + 854, + 394 + ], + [ + 837, + 387 + ], + [ + 832, + 381 + ], + [ + 827, + 378 + ], + [ + 820, + 381 + ], + [ + 818, + 391 + ], + [ + 820, + 421 + ], + [ + 811, + 422 + ], + [ + 810, + 404 + ], + [ + 807, + 389 + ], + [ + 781, + 364 + ], + [ + 729, + 339 + ], + [ + 714, + 339 + ], + [ + 709, + 333 + ], + [ + 705, + 325 + ], + [ + 702, + 321 + ], + [ + 699, + 313 + ], + [ + 700, + 301 + ], + [ + 700, + 286 + ], + [ + 703, + 271 + ], + [ + 700, + 269 + ], + [ + 688, + 269 + ], + [ + 680, + 270 + ], + [ + 676, + 265 + ], + [ + 672, + 259 + ], + [ + 671, + 248 + ], + [ + 670, + 237 + ], + [ + 676, + 225 + ], + [ + 671, + 214 + ], + [ + 668, + 210 + ], + [ + 671, + 201 + ], + [ + 675, + 199 + ], + [ + 680, + 187 + ], + [ + 689, + 182 + ], + [ + 687, + 175 + ], + [ + 685, + 165 + ], + [ + 691, + 148 + ], + [ + 692, + 137 + ], + [ + 694, + 128 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 843, + 401 + ], + [ + 843, + 421 + ], + [ + 844, + 424 + ], + [ + 844, + 394 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 850, + 382 + ], + [ + 849, + 413 + ], + [ + 837, + 412 + ], + [ + 838, + 382 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 802, + 328 + ], + [ + 805, + 395 + ], + [ + 785, + 412 + ], + [ + 776, + 394 + ], + [ + 774, + 329 + ], + [ + 788, + 329 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 701, + 353 + ], + [ + 701, + 444 + ], + [ + 697, + 433 + ], + [ + 696, + 347 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 695, + 325 + ], + [ + 708, + 325 + ], + [ + 707, + 356 + ], + [ + 695, + 356 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 711, + 385 + ], + [ + 711, + 401 + ], + [ + 694, + 401 + ], + [ + 692, + 386 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 685, + 370 + ], + [ + 722, + 375 + ], + [ + 721, + 388 + ], + [ + 685, + 388 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 773, + 405 + ], + [ + 772, + 439 + ], + [ + 772, + 443 + ], + [ + 774, + 397 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 786, + 390 + ], + [ + 784, + 444 + ], + [ + 785, + 448 + ], + [ + 788, + 385 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 769, + 394 + ], + [ + 771, + 408 + ], + [ + 781, + 407 + ], + [ + 780, + 394 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 782, + 379 + ], + [ + 786, + 380 + ], + [ + 790, + 382 + ], + [ + 791, + 417 + ], + [ + 780, + 417 + ], + [ + 781, + 396 + ], + [ + 781, + 388 + ], + [ + 781, + 382 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 740, + 441 + ], + [ + 744, + 417 + ], + [ + 742, + 411 + ], + [ + 737, + 405 + ], + [ + 732, + 402 + ], + [ + 715, + 397 + ], + [ + 714, + 387 + ], + [ + 726, + 308 + ], + [ + 734, + 255 + ], + [ + 736, + 234 + ], + [ + 740, + 231 + ], + [ + 743, + 231 + ], + [ + 749, + 235 + ], + [ + 772, + 332 + ], + [ + 782, + 381 + ], + [ + 782, + 392 + ], + [ + 770, + 397 + ], + [ + 769, + 408 + ], + [ + 765, + 413 + ], + [ + 761, + 415 + ], + [ + 767, + 445 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 765, + 456 + ], + [ + 765, + 431 + ], + [ + 765, + 422 + ], + [ + 777, + 415 + ], + [ + 798, + 415 + ], + [ + 815, + 415 + ], + [ + 830, + 414 + ], + [ + 853, + 417 + ], + [ + 873, + 417 + ], + [ + 879, + 411 + ], + [ + 887, + 406 + ], + [ + 893, + 404 + ], + [ + 899, + 399 + ], + [ + 907, + 399 + ], + [ + 913, + 405 + ], + [ + 933, + 412 + ], + [ + 941, + 403 + ], + [ + 950, + 400 + ], + [ + 962, + 397 + ], + [ + 973, + 400 + ], + [ + 988, + 408 + ], + [ + 1011, + 432 + ], + [ + 968, + 493 + ], + [ + 836, + 507 + ], + [ + 768, + 461 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 948, + 448 + ], + [ + 951, + 491 + ], + [ + 947, + 491 + ], + [ + 943, + 451 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 861, + 447 + ], + [ + 861, + 491 + ], + [ + 859, + 491 + ], + [ + 856, + 447 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 840, + 484 + ], + [ + 861, + 483 + ], + [ + 873, + 481 + ], + [ + 893, + 483 + ], + [ + 927, + 483 + ], + [ + 956, + 485 + ], + [ + 956, + 512 + ], + [ + 879, + 514 + ], + [ + 857, + 516 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 623, + 331 + ], + [ + 625, + 414 + ], + [ + 621, + 414 + ], + [ + 620, + 326 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 591, + 331 + ], + [ + 619, + 330 + ], + [ + 619, + 341 + ], + [ + 615, + 341 + ], + [ + 612, + 347 + ], + [ + 600, + 347 + ], + [ + 599, + 340 + ], + [ + 591, + 340 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 629, + 365 + ], + [ + 630, + 387 + ], + [ + 618, + 386 + ], + [ + 619, + 367 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 630, + 344 + ], + [ + 630, + 370 + ], + [ + 616, + 370 + ], + [ + 615, + 342 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 507, + 309 + ], + [ + 513, + 420 + ], + [ + 505, + 422 + ], + [ + 503, + 308 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 451, + 440 + ], + [ + 458, + 443 + ], + [ + 457, + 454 + ], + [ + 441, + 470 + ], + [ + 437, + 464 + ], + [ + 436, + 457 + ], + [ + 439, + 445 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 358, + 498 + ], + [ + 369, + 461 + ], + [ + 369, + 423 + ], + [ + 368, + 355 + ], + [ + 374, + 305 + ], + [ + 375, + 236 + ], + [ + 378, + 200 + ], + [ + 372, + 111 + ], + [ + 364, + 81 + ], + [ + 354, + 80 + ], + [ + 339, + 74 + ], + [ + 324, + 74 + ], + [ + 307, + 73 + ], + [ + 289, + 65 + ], + [ + 279, + 55 + ], + [ + 271, + 46 + ], + [ + 253, + 40 + ], + [ + 232, + 37 + ], + [ + 219, + 34 + ], + [ + 213, + 27 + ], + [ + 210, + 18 + ], + [ + 200, + 12 + ], + [ + 209, + 0 + ], + [ + 458, + 0 + ], + [ + 460, + 0 + ], + [ + 460, + 9 + ], + [ + 459, + 19 + ], + [ + 456, + 39 + ], + [ + 458, + 53 + ], + [ + 461, + 65 + ], + [ + 461, + 78 + ], + [ + 452, + 118 + ], + [ + 439, + 163 + ], + [ + 426, + 212 + ], + [ + 423, + 228 + ], + [ + 418, + 278 + ], + [ + 414, + 343 + ], + [ + 414, + 380 + ], + [ + 419, + 426 + ], + [ + 417, + 459 + ], + [ + 422, + 498 + ], + [ + 403, + 504 + ], + [ + 376, + 507 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 28, + 252 + ], + [ + 93, + 232 + ], + [ + 191, + 208 + ], + [ + 241, + 198 + ], + [ + 248, + 192 + ], + [ + 262, + 188 + ], + [ + 275, + 194 + ], + [ + 324, + 213 + ], + [ + 376, + 232 + ], + [ + 456, + 269 + ], + [ + 506, + 290 + ], + [ + 546, + 305 + ], + [ + 559, + 313 + ], + [ + 557, + 317 + ], + [ + 539, + 312 + ], + [ + 392, + 312 + ], + [ + 297, + 314 + ], + [ + 273, + 335 + ], + [ + 281, + 488 + ], + [ + 334, + 487 + ], + [ + 367, + 481 + ], + [ + 389, + 477 + ], + [ + 412, + 472 + ], + [ + 422, + 464 + ], + [ + 453, + 464 + ], + [ + 465, + 563 + ], + [ + 430, + 568 + ], + [ + 385, + 578 + ], + [ + 383, + 584 + ], + [ + 357, + 588 + ], + [ + 310, + 590 + ], + [ + 214, + 592 + ], + [ + 227, + 494 + ], + [ + 268, + 490 + ], + [ + 268, + 412 + ], + [ + 265, + 339 + ], + [ + 242, + 318 + ], + [ + 154, + 324 + ], + [ + 107, + 312 + ], + [ + 47, + 304 + ], + [ + 0, + 294 + ], + [ + 0, + 259 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 685, + 453 + ], + [ + 706, + 435 + ], + [ + 757, + 433 + ], + [ + 788, + 436 + ], + [ + 807, + 442 + ], + [ + 823, + 458 + ], + [ + 830, + 466 + ], + [ + 838, + 464 + ], + [ + 845, + 464 + ], + [ + 851, + 469 + ], + [ + 849, + 478 + ], + [ + 852, + 485 + ], + [ + 861, + 494 + ], + [ + 861, + 510 + ], + [ + 865, + 519 + ], + [ + 865, + 536 + ], + [ + 860, + 554 + ], + [ + 850, + 558 + ], + [ + 837, + 556 + ], + [ + 830, + 550 + ], + [ + 826, + 546 + ], + [ + 816, + 548 + ], + [ + 816, + 555 + ], + [ + 815, + 562 + ], + [ + 812, + 566 + ], + [ + 797, + 569 + ], + [ + 788, + 569 + ], + [ + 779, + 558 + ], + [ + 774, + 554 + ], + [ + 752, + 547 + ], + [ + 730, + 520 + ], + [ + 696, + 472 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 464, + 432 + ], + [ + 475, + 424 + ], + [ + 491, + 415 + ], + [ + 543, + 409 + ], + [ + 572, + 407 + ], + [ + 629, + 407 + ], + [ + 664, + 409 + ], + [ + 685, + 417 + ], + [ + 705, + 431 + ], + [ + 721, + 451 + ], + [ + 727, + 463 + ], + [ + 729, + 464 + ], + [ + 732, + 457 + ], + [ + 738, + 454 + ], + [ + 746, + 454 + ], + [ + 752, + 458 + ], + [ + 755, + 463 + ], + [ + 755, + 466 + ], + [ + 746, + 473 + ], + [ + 737, + 473 + ], + [ + 754, + 490 + ], + [ + 762, + 502 + ], + [ + 768, + 522 + ], + [ + 769, + 552 + ], + [ + 764, + 575 + ], + [ + 758, + 585 + ], + [ + 738, + 586 + ], + [ + 732, + 582 + ], + [ + 726, + 575 + ], + [ + 711, + 573 + ], + [ + 694, + 575 + ], + [ + 689, + 599 + ], + [ + 684, + 606 + ], + [ + 668, + 607 + ], + [ + 650, + 606 + ], + [ + 643, + 597 + ], + [ + 637, + 582 + ], + [ + 606, + 582 + ], + [ + 580, + 581 + ], + [ + 567, + 588 + ], + [ + 555, + 588 + ], + [ + 547, + 588 + ], + [ + 541, + 583 + ], + [ + 532, + 583 + ], + [ + 519, + 585 + ], + [ + 507, + 588 + ], + [ + 498, + 591 + ], + [ + 493, + 593 + ], + [ + 484, + 595 + ], + [ + 469, + 608 + ], + [ + 457, + 611 + ], + [ + 448, + 609 + ], + [ + 436, + 602 + ], + [ + 429, + 581 + ], + [ + 430, + 561 + ], + [ + 426, + 516 + ], + [ + 431, + 477 + ], + [ + 441, + 464 + ], + [ + 452, + 448 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1038, + 412 + ], + [ + 1076, + 414 + ], + [ + 1104, + 416 + ], + [ + 1122, + 419 + ], + [ + 1141, + 437 + ], + [ + 1158, + 473 + ], + [ + 1169, + 490 + ], + [ + 1171, + 530 + ], + [ + 1169, + 559 + ], + [ + 1162, + 561 + ], + [ + 1147, + 565 + ], + [ + 1139, + 574 + ], + [ + 1126, + 574 + ], + [ + 1119, + 569 + ], + [ + 1113, + 558 + ], + [ + 1059, + 555 + ], + [ + 1017, + 554 + ], + [ + 1011, + 560 + ], + [ + 997, + 561 + ], + [ + 992, + 558 + ], + [ + 980, + 555 + ], + [ + 978, + 568 + ], + [ + 968, + 574 + ], + [ + 958, + 572 + ], + [ + 952, + 565 + ], + [ + 948, + 543 + ], + [ + 950, + 502 + ], + [ + 972, + 446 + ], + [ + 981, + 432 + ], + [ + 995, + 418 + ], + [ + 1010, + 412 + ], + [ + 1023, + 411 + ], + [ + 1031, + 410 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 25, + 413 + ], + [ + 72, + 413 + ], + [ + 123, + 417 + ], + [ + 172, + 422 + ], + [ + 182, + 438 + ], + [ + 202, + 458 + ], + [ + 227, + 482 + ], + [ + 235, + 497 + ], + [ + 236, + 522 + ], + [ + 243, + 537 + ], + [ + 247, + 554 + ], + [ + 242, + 577 + ], + [ + 224, + 603 + ], + [ + 214, + 625 + ], + [ + 192, + 637 + ], + [ + 164, + 637 + ], + [ + 150, + 631 + ], + [ + 142, + 624 + ], + [ + 135, + 618 + ], + [ + 104, + 621 + ], + [ + 48, + 627 + ], + [ + 41, + 637 + ], + [ + 32, + 647 + ], + [ + 18, + 656 + ], + [ + 0, + 659 + ], + [ + 2, + 413 + ], + [ + 8, + 413 + ], + [ + 21, + 413 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 149, + 558 + ], + [ + 213, + 555 + ], + [ + 215, + 573 + ], + [ + 154, + 579 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 490, + 487 + ], + [ + 569, + 486 + ], + [ + 568, + 507 + ], + [ + 491, + 507 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1008, + 515 + ], + [ + 1072, + 516 + ], + [ + 1070, + 530 + ], + [ + 1008, + 529 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 997, + 55 + ], + [ + 1011, + 55 + ], + [ + 1014, + 50 + ], + [ + 1019, + 56 + ], + [ + 1030, + 55 + ], + [ + 1031, + 64 + ], + [ + 998, + 65 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000018_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000018_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c458c65f7c9b03596aa942616490097eaace276e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000018_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000018_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000018_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..b6de05b188c54e56108f1eadc8d83d130c1e935e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000018_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..651a8d0406302cf8e6653d1f6ea7daf486f928da Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..14cfbb04fa4cf7b51d5fd8b7a20c6e20a6dc8ff2 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..577a47556bb360bb865c624b0bd752f55b392def --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000019_000019_gtFine_polygons.json @@ -0,0 +1,6136 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 534, + 1 + ], + [ + 1149, + 1 + ], + [ + 1149, + 390 + ], + [ + 617, + 388 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 0, + 591 + ], + [ + 817, + 446 + ], + [ + 873, + 443 + ], + [ + 898, + 441 + ], + [ + 941, + 437 + ], + [ + 965, + 454 + ], + [ + 1031, + 462 + ], + [ + 1181, + 477 + ], + [ + 1333, + 491 + ], + [ + 1751, + 559 + ], + [ + 1986, + 606 + ], + [ + 2048, + 611 + ], + [ + 2047, + 1024 + ], + [ + 0, + 1024 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 962, + 190 + ], + [ + 1013, + 191 + ], + [ + 1026, + 356 + ], + [ + 966, + 353 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 831, + 313 + ], + [ + 861, + 320 + ], + [ + 872, + 320 + ], + [ + 882, + 320 + ], + [ + 891, + 321 + ], + [ + 898, + 325 + ], + [ + 908, + 332 + ], + [ + 919, + 344 + ], + [ + 938, + 351 + ], + [ + 976, + 335 + ], + [ + 996, + 317 + ], + [ + 1026, + 316 + ], + [ + 1024, + 443 + ], + [ + 940, + 445 + ], + [ + 904, + 444 + ], + [ + 880, + 449 + ], + [ + 803, + 448 + ], + [ + 678, + 441 + ], + [ + 681, + 321 + ], + [ + 729, + 305 + ], + [ + 764, + 304 + ], + [ + 791, + 298 + ], + [ + 813, + 309 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 899, + 408 + ], + [ + 900, + 424 + ], + [ + 914, + 426 + ], + [ + 926, + 429 + ], + [ + 934, + 427 + ], + [ + 942, + 422 + ], + [ + 948, + 422 + ], + [ + 953, + 422 + ], + [ + 959, + 420 + ], + [ + 962, + 410 + ], + [ + 973, + 410 + ], + [ + 969, + 434 + ], + [ + 944, + 441 + ], + [ + 893, + 445 + ], + [ + 874, + 448 + ], + [ + 845, + 452 + ], + [ + 848, + 431 + ], + [ + 867, + 427 + ], + [ + 877, + 427 + ], + [ + 887, + 424 + ], + [ + 893, + 424 + ], + [ + 894, + 410 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 776, + 403 + ], + [ + 776, + 431 + ], + [ + 761, + 441 + ], + [ + 763, + 403 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 754, + 411 + ], + [ + 752, + 443 + ], + [ + 735, + 442 + ], + [ + 734, + 423 + ], + [ + 744, + 421 + ], + [ + 744, + 410 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 734, + 386 + ], + [ + 732, + 443 + ], + [ + 729, + 441 + ], + [ + 728, + 384 + ] + ] + }, + { + "label": "train", + "polygon": [ + [ + 822, + 407 + ], + [ + 852, + 411 + ], + [ + 858, + 418 + ], + [ + 856, + 439 + ], + [ + 850, + 451 + ], + [ + 820, + 450 + ], + [ + 811, + 440 + ], + [ + 810, + 432 + ], + [ + 811, + 421 + ], + [ + 813, + 411 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 961, + 384 + ], + [ + 962, + 443 + ], + [ + 960, + 443 + ], + [ + 959, + 379 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 932, + 352 + ], + [ + 985, + 361 + ], + [ + 985, + 362 + ], + [ + 935, + 355 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1030, + 358 + ], + [ + 1028, + 412 + ], + [ + 976, + 405 + ], + [ + 975, + 359 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 937, + 348 + ], + [ + 936, + 367 + ], + [ + 930, + 367 + ], + [ + 930, + 349 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 976, + 221 + ], + [ + 990, + 220 + ], + [ + 1003, + 224 + ], + [ + 1021, + 225 + ], + [ + 1030, + 225 + ], + [ + 1035, + 297 + ], + [ + 1038, + 326 + ], + [ + 1035, + 353 + ], + [ + 1026, + 368 + ], + [ + 1009, + 369 + ], + [ + 992, + 372 + ], + [ + 993, + 422 + ], + [ + 989, + 416 + ], + [ + 984, + 369 + ], + [ + 964, + 356 + ], + [ + 953, + 352 + ], + [ + 935, + 352 + ], + [ + 919, + 342 + ], + [ + 914, + 329 + ], + [ + 912, + 325 + ], + [ + 910, + 315 + ], + [ + 910, + 309 + ], + [ + 918, + 301 + ], + [ + 924, + 297 + ], + [ + 931, + 293 + ], + [ + 929, + 284 + ], + [ + 924, + 277 + ], + [ + 920, + 270 + ], + [ + 920, + 261 + ], + [ + 926, + 259 + ], + [ + 932, + 259 + ], + [ + 932, + 259 + ], + [ + 932, + 253 + ], + [ + 934, + 245 + ], + [ + 945, + 234 + ], + [ + 950, + 230 + ], + [ + 965, + 226 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1031, + 402 + ], + [ + 1036, + 435 + ], + [ + 989, + 456 + ], + [ + 981, + 445 + ], + [ + 980, + 430 + ], + [ + 980, + 422 + ], + [ + 984, + 409 + ], + [ + 1017, + 403 + ], + [ + 1028, + 400 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2048, + 498 + ], + [ + 2048, + 710 + ], + [ + 1953, + 718 + ], + [ + 1861, + 721 + ], + [ + 1804, + 718 + ], + [ + 1666, + 676 + ], + [ + 1665, + 667 + ], + [ + 1672, + 650 + ], + [ + 1695, + 643 + ], + [ + 1876, + 628 + ], + [ + 1892, + 615 + ], + [ + 1844, + 609 + ], + [ + 1752, + 597 + ], + [ + 1487, + 560 + ], + [ + 1392, + 571 + ], + [ + 1349, + 572 + ], + [ + 1322, + 570 + ], + [ + 1309, + 563 + ], + [ + 1307, + 556 + ], + [ + 1317, + 545 + ], + [ + 1391, + 531 + ], + [ + 1349, + 520 + ], + [ + 1261, + 524 + ], + [ + 1233, + 526 + ], + [ + 1199, + 525 + ], + [ + 1105, + 522 + ], + [ + 1073, + 522 + ], + [ + 1050, + 516 + ], + [ + 1045, + 509 + ], + [ + 1061, + 503 + ], + [ + 1105, + 488 + ], + [ + 1140, + 472 + ], + [ + 1917, + 497 + ], + [ + 1995, + 498 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 929, + 460 + ], + [ + 933, + 450 + ], + [ + 938, + 441 + ], + [ + 942, + 436 + ], + [ + 949, + 433 + ], + [ + 955, + 433 + ], + [ + 965, + 435 + ], + [ + 956, + 469 + ], + [ + 948, + 480 + ], + [ + 942, + 486 + ], + [ + 940, + 490 + ], + [ + 932, + 489 + ], + [ + 927, + 486 + ], + [ + 927, + 480 + ], + [ + 927, + 473 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1046, + 460 + ], + [ + 1027, + 348 + ], + [ + 1019, + 276 + ], + [ + 1010, + 207 + ], + [ + 1006, + 185 + ], + [ + 1004, + 148 + ], + [ + 1006, + 137 + ], + [ + 1025, + 113 + ], + [ + 1039, + 94 + ], + [ + 1045, + 77 + ], + [ + 1056, + 71 + ], + [ + 1054, + 59 + ], + [ + 1072, + 47 + ], + [ + 1092, + 37 + ], + [ + 1101, + 21 + ], + [ + 1109, + 9 + ], + [ + 1103, + 0 + ], + [ + 2045, + 2 + ], + [ + 2048, + 538 + ], + [ + 1856, + 530 + ], + [ + 1760, + 524 + ], + [ + 1377, + 497 + ], + [ + 1125, + 470 + ], + [ + 1089, + 464 + ], + [ + 1058, + 464 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 983, + 256 + ], + [ + 976, + 243 + ], + [ + 972, + 224 + ], + [ + 971, + 211 + ], + [ + 965, + 199 + ], + [ + 964, + 194 + ], + [ + 944, + 200 + ], + [ + 932, + 195 + ], + [ + 923, + 195 + ], + [ + 914, + 201 + ], + [ + 905, + 210 + ], + [ + 899, + 208 + ], + [ + 890, + 205 + ], + [ + 880, + 203 + ], + [ + 872, + 197 + ], + [ + 856, + 197 + ], + [ + 835, + 196 + ], + [ + 810, + 179 + ], + [ + 805, + 161 + ], + [ + 807, + 145 + ], + [ + 791, + 128 + ], + [ + 789, + 119 + ], + [ + 796, + 106 + ], + [ + 810, + 94 + ], + [ + 810, + 86 + ], + [ + 817, + 76 + ], + [ + 824, + 66 + ], + [ + 824, + 59 + ], + [ + 825, + 48 + ], + [ + 814, + 48 + ], + [ + 798, + 48 + ], + [ + 795, + 46 + ], + [ + 800, + 42 + ], + [ + 808, + 39 + ], + [ + 813, + 35 + ], + [ + 811, + 24 + ], + [ + 807, + 21 + ], + [ + 800, + 8 + ], + [ + 814, + 6 + ], + [ + 823, + 6 + ], + [ + 831, + 0 + ], + [ + 1084, + 1 + ], + [ + 1085, + 7 + ], + [ + 1094, + 19 + ], + [ + 1100, + 42 + ], + [ + 1086, + 62 + ], + [ + 1090, + 75 + ], + [ + 1095, + 89 + ], + [ + 1094, + 108 + ], + [ + 1082, + 128 + ], + [ + 1068, + 132 + ], + [ + 1055, + 133 + ], + [ + 1033, + 144 + ], + [ + 1026, + 154 + ], + [ + 1018, + 160 + ], + [ + 1027, + 172 + ], + [ + 1041, + 187 + ], + [ + 1050, + 204 + ], + [ + 1053, + 238 + ], + [ + 1056, + 254 + ], + [ + 1063, + 278 + ], + [ + 1061, + 300 + ], + [ + 1062, + 316 + ], + [ + 1063, + 328 + ], + [ + 1059, + 337 + ], + [ + 1064, + 353 + ], + [ + 1055, + 350 + ], + [ + 1044, + 348 + ], + [ + 1038, + 348 + ], + [ + 1033, + 357 + ], + [ + 1044, + 405 + ], + [ + 1045, + 426 + ], + [ + 1055, + 436 + ], + [ + 1062, + 480 + ], + [ + 1040, + 489 + ], + [ + 1024, + 487 + ], + [ + 1017, + 483 + ], + [ + 1012, + 474 + ], + [ + 1010, + 465 + ], + [ + 1009, + 450 + ], + [ + 1011, + 437 + ], + [ + 1016, + 428 + ], + [ + 1018, + 420 + ], + [ + 1021, + 409 + ], + [ + 1021, + 391 + ], + [ + 1017, + 364 + ], + [ + 1007, + 340 + ], + [ + 994, + 314 + ], + [ + 982, + 294 + ], + [ + 980, + 280 + ], + [ + 980, + 269 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1070, + 361 + ], + [ + 1063, + 389 + ], + [ + 1053, + 388 + ], + [ + 1048, + 383 + ], + [ + 1048, + 378 + ], + [ + 1053, + 371 + ], + [ + 1053, + 369 + ], + [ + 1053, + 361 + ], + [ + 1060, + 358 + ], + [ + 1067, + 358 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1070, + 438 + ], + [ + 1065, + 434 + ], + [ + 1064, + 422 + ], + [ + 1062, + 414 + ], + [ + 1058, + 408 + ], + [ + 1050, + 400 + ], + [ + 1059, + 369 + ], + [ + 1061, + 351 + ], + [ + 1065, + 338 + ], + [ + 1070, + 337 + ], + [ + 1072, + 339 + ], + [ + 1079, + 353 + ], + [ + 1084, + 345 + ], + [ + 1086, + 334 + ], + [ + 1095, + 330 + ], + [ + 1100, + 330 + ], + [ + 1123, + 360 + ], + [ + 1124, + 386 + ], + [ + 1119, + 431 + ], + [ + 1090, + 440 + ], + [ + 1079, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1059, + 431 + ], + [ + 1072, + 430 + ], + [ + 1084, + 447 + ], + [ + 1073, + 491 + ], + [ + 1048, + 499 + ], + [ + 1041, + 498 + ], + [ + 1040, + 495 + ], + [ + 1038, + 498 + ], + [ + 1035, + 492 + ], + [ + 1034, + 486 + ], + [ + 1034, + 481 + ], + [ + 1034, + 473 + ], + [ + 1034, + 466 + ], + [ + 1037, + 460 + ], + [ + 1031, + 456 + ], + [ + 1029, + 452 + ], + [ + 1031, + 450 + ], + [ + 1036, + 450 + ], + [ + 1041, + 451 + ], + [ + 1045, + 442 + ], + [ + 1049, + 435 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1078, + 426 + ], + [ + 1100, + 422 + ], + [ + 1122, + 424 + ], + [ + 1138, + 433 + ], + [ + 1152, + 459 + ], + [ + 1152, + 481 + ], + [ + 1128, + 496 + ], + [ + 1072, + 495 + ], + [ + 1063, + 500 + ], + [ + 1049, + 498 + ], + [ + 1048, + 486 + ], + [ + 1050, + 470 + ], + [ + 1053, + 463 + ], + [ + 1054, + 457 + ], + [ + 1047, + 454 + ], + [ + 1047, + 449 + ], + [ + 1057, + 449 + ], + [ + 1059, + 450 + ], + [ + 1063, + 439 + ], + [ + 1070, + 433 + ], + [ + 1074, + 428 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1048, + 166 + ], + [ + 1053, + 157 + ], + [ + 1060, + 151 + ], + [ + 1064, + 146 + ], + [ + 1064, + 144 + ], + [ + 1070, + 138 + ], + [ + 1071, + 129 + ], + [ + 1081, + 126 + ], + [ + 1094, + 125 + ], + [ + 1099, + 114 + ], + [ + 1111, + 114 + ], + [ + 1115, + 117 + ], + [ + 1125, + 119 + ], + [ + 1129, + 112 + ], + [ + 1129, + 104 + ], + [ + 1138, + 104 + ], + [ + 1155, + 103 + ], + [ + 1162, + 98 + ], + [ + 1168, + 104 + ], + [ + 1170, + 111 + ], + [ + 1180, + 112 + ], + [ + 1183, + 120 + ], + [ + 1183, + 132 + ], + [ + 1195, + 132 + ], + [ + 1202, + 133 + ], + [ + 1206, + 142 + ], + [ + 1214, + 149 + ], + [ + 1221, + 160 + ], + [ + 1224, + 173 + ], + [ + 1224, + 190 + ], + [ + 1215, + 201 + ], + [ + 1222, + 214 + ], + [ + 1225, + 220 + ], + [ + 1227, + 229 + ], + [ + 1229, + 232 + ], + [ + 1229, + 245 + ], + [ + 1233, + 249 + ], + [ + 1240, + 259 + ], + [ + 1245, + 264 + ], + [ + 1238, + 299 + ], + [ + 1254, + 317 + ], + [ + 1253, + 323 + ], + [ + 1234, + 321 + ], + [ + 1219, + 322 + ], + [ + 1198, + 336 + ], + [ + 1198, + 341 + ], + [ + 1198, + 351 + ], + [ + 1214, + 364 + ], + [ + 1217, + 377 + ], + [ + 1212, + 397 + ], + [ + 1190, + 406 + ], + [ + 1176, + 420 + ], + [ + 1167, + 432 + ], + [ + 1157, + 439 + ], + [ + 1141, + 441 + ], + [ + 1142, + 475 + ], + [ + 1129, + 475 + ], + [ + 1120, + 432 + ], + [ + 1113, + 392 + ], + [ + 1098, + 377 + ], + [ + 1086, + 366 + ], + [ + 1081, + 353 + ], + [ + 1079, + 331 + ], + [ + 1081, + 317 + ], + [ + 1061, + 302 + ], + [ + 1043, + 311 + ], + [ + 1037, + 299 + ], + [ + 1028, + 265 + ], + [ + 1026, + 242 + ], + [ + 1031, + 227 + ], + [ + 1040, + 213 + ], + [ + 1039, + 204 + ], + [ + 1039, + 198 + ], + [ + 1039, + 178 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1155, + 463 + ], + [ + 1155, + 470 + ], + [ + 1154, + 476 + ], + [ + 1151, + 504 + ], + [ + 1147, + 505 + ], + [ + 1119, + 506 + ], + [ + 1114, + 504 + ], + [ + 1111, + 475 + ], + [ + 1110, + 468 + ], + [ + 1117, + 464 + ], + [ + 1142, + 463 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1096, + 372 + ], + [ + 1101, + 520 + ], + [ + 1104, + 523 + ], + [ + 1100, + 386 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1088, + 379 + ], + [ + 1089, + 368 + ], + [ + 1092, + 367 + ], + [ + 1097, + 367 + ], + [ + 1099, + 371 + ], + [ + 1100, + 386 + ], + [ + 1098, + 391 + ], + [ + 1092, + 391 + ], + [ + 1089, + 387 + ], + [ + 1089, + 383 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1283, + 523 + ], + [ + 1257, + 524 + ], + [ + 1260, + 528 + ], + [ + 1228, + 524 + ], + [ + 1196, + 524 + ], + [ + 1106, + 523 + ], + [ + 1305, + 563 + ], + [ + 1318, + 551 + ], + [ + 1374, + 544 + ], + [ + 1328, + 500 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1953, + 718 + ], + [ + 1811, + 716 + ], + [ + 2047, + 828 + ], + [ + 2047, + 687 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1283, + 523 + ], + [ + 1257, + 524 + ], + [ + 1260, + 528 + ], + [ + 1228, + 524 + ], + [ + 1196, + 524 + ], + [ + 1106, + 523 + ], + [ + 1305, + 563 + ], + [ + 1318, + 551 + ], + [ + 1374, + 544 + ], + [ + 1328, + 500 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1383, + 568 + ], + [ + 1332, + 570 + ], + [ + 1669, + 673 + ], + [ + 1671, + 650 + ], + [ + 1687, + 645 + ], + [ + 1850, + 631 + ], + [ + 1848, + 610 + ], + [ + 1757, + 596 + ], + [ + 1564, + 543 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1074, + 450 + ], + [ + 1079, + 450 + ], + [ + 1082, + 508 + ], + [ + 1076, + 508 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1383, + 568 + ], + [ + 1332, + 570 + ], + [ + 1669, + 673 + ], + [ + 1671, + 650 + ], + [ + 1687, + 645 + ], + [ + 1850, + 631 + ], + [ + 1848, + 610 + ], + [ + 1757, + 596 + ], + [ + 1564, + 543 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1090, + 450 + ], + [ + 1093, + 511 + ], + [ + 1086, + 511 + ], + [ + 1084, + 451 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1953, + 718 + ], + [ + 1811, + 716 + ], + [ + 2047, + 828 + ], + [ + 2047, + 687 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1115, + 451 + ], + [ + 1118, + 519 + ], + [ + 1114, + 519 + ], + [ + 1108, + 453 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1179, + 433 + ], + [ + 1188, + 439 + ], + [ + 1197, + 433 + ], + [ + 1212, + 442 + ], + [ + 1232, + 456 + ], + [ + 1220, + 512 + ], + [ + 1207, + 513 + ], + [ + 1202, + 516 + ], + [ + 1199, + 514 + ], + [ + 1199, + 509 + ], + [ + 1199, + 508 + ], + [ + 1197, + 505 + ], + [ + 1193, + 509 + ], + [ + 1184, + 508 + ], + [ + 1178, + 507 + ], + [ + 1170, + 519 + ], + [ + 1167, + 518 + ], + [ + 1171, + 496 + ], + [ + 1179, + 473 + ], + [ + 1183, + 462 + ], + [ + 1186, + 450 + ], + [ + 1178, + 442 + ], + [ + 1176, + 436 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1225, + 429 + ], + [ + 1256, + 428 + ], + [ + 1283, + 426 + ], + [ + 1238, + 505 + ], + [ + 1232, + 512 + ], + [ + 1218, + 519 + ], + [ + 1211, + 509 + ], + [ + 1208, + 495 + ], + [ + 1202, + 491 + ], + [ + 1206, + 475 + ], + [ + 1208, + 465 + ], + [ + 1206, + 462 + ], + [ + 1205, + 456 + ], + [ + 1211, + 454 + ], + [ + 1217, + 449 + ], + [ + 1220, + 447 + ], + [ + 1228, + 445 + ], + [ + 1237, + 445 + ], + [ + 1236, + 436 + ], + [ + 1221, + 437 + ], + [ + 1212, + 433 + ], + [ + 1217, + 429 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 974, + 430 + ], + [ + 987, + 430 + ], + [ + 1003, + 433 + ], + [ + 1009, + 439 + ], + [ + 1015, + 458 + ], + [ + 1016, + 473 + ], + [ + 1016, + 484 + ], + [ + 1014, + 494 + ], + [ + 1009, + 495 + ], + [ + 1004, + 492 + ], + [ + 1003, + 491 + ], + [ + 993, + 489 + ], + [ + 987, + 486 + ], + [ + 970, + 485 + ], + [ + 954, + 484 + ], + [ + 955, + 488 + ], + [ + 952, + 492 + ], + [ + 948, + 493 + ], + [ + 943, + 492 + ], + [ + 942, + 486 + ], + [ + 944, + 464 + ], + [ + 944, + 455 + ], + [ + 948, + 445 + ], + [ + 952, + 436 + ], + [ + 962, + 430 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1289, + 406 + ], + [ + 1324, + 404 + ], + [ + 1377, + 404 + ], + [ + 1404, + 408 + ], + [ + 1405, + 515 + ], + [ + 1377, + 522 + ], + [ + 1338, + 529 + ], + [ + 1315, + 530 + ], + [ + 1313, + 538 + ], + [ + 1311, + 546 + ], + [ + 1302, + 547 + ], + [ + 1288, + 545 + ], + [ + 1283, + 533 + ], + [ + 1282, + 528 + ], + [ + 1275, + 525 + ], + [ + 1263, + 525 + ], + [ + 1259, + 533 + ], + [ + 1254, + 538 + ], + [ + 1241, + 537 + ], + [ + 1232, + 525 + ], + [ + 1228, + 500 + ], + [ + 1230, + 477 + ], + [ + 1238, + 465 + ], + [ + 1245, + 456 + ], + [ + 1240, + 452 + ], + [ + 1238, + 444 + ], + [ + 1252, + 441 + ], + [ + 1255, + 443 + ], + [ + 1266, + 429 + ], + [ + 1278, + 414 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1630, + 394 + ], + [ + 1631, + 378 + ], + [ + 1637, + 367 + ], + [ + 1643, + 366 + ], + [ + 1647, + 370 + ], + [ + 1649, + 384 + ], + [ + 1649, + 396 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1379, + 465 + ], + [ + 1379, + 447 + ], + [ + 1378, + 360 + ], + [ + 1371, + 327 + ], + [ + 1368, + 255 + ], + [ + 1367, + 223 + ], + [ + 1367, + 206 + ], + [ + 1370, + 173 + ], + [ + 1368, + 148 + ], + [ + 1370, + 126 + ], + [ + 1369, + 99 + ], + [ + 1365, + 47 + ], + [ + 1362, + 22 + ], + [ + 1350, + 0 + ], + [ + 1422, + 0 + ], + [ + 1431, + 120 + ], + [ + 1432, + 177 + ], + [ + 1429, + 224 + ], + [ + 1431, + 300 + ], + [ + 1434, + 368 + ], + [ + 1437, + 408 + ], + [ + 1442, + 464 + ], + [ + 1399, + 548 + ], + [ + 1377, + 548 + ], + [ + 1366, + 544 + ], + [ + 1368, + 533 + ], + [ + 1377, + 506 + ], + [ + 1381, + 490 + ], + [ + 1379, + 476 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1497, + 392 + ], + [ + 1542, + 387 + ], + [ + 1617, + 383 + ], + [ + 1663, + 384 + ], + [ + 1707, + 391 + ], + [ + 1739, + 425 + ], + [ + 1753, + 453 + ], + [ + 1765, + 482 + ], + [ + 1771, + 509 + ], + [ + 1769, + 572 + ], + [ + 1765, + 589 + ], + [ + 1755, + 603 + ], + [ + 1731, + 604 + ], + [ + 1714, + 598 + ], + [ + 1707, + 588 + ], + [ + 1621, + 581 + ], + [ + 1585, + 581 + ], + [ + 1548, + 583 + ], + [ + 1542, + 584 + ], + [ + 1539, + 600 + ], + [ + 1536, + 612 + ], + [ + 1525, + 616 + ], + [ + 1504, + 616 + ], + [ + 1494, + 610 + ], + [ + 1486, + 588 + ], + [ + 1480, + 575 + ], + [ + 1446, + 571 + ], + [ + 1430, + 571 + ], + [ + 1429, + 576 + ], + [ + 1422, + 589 + ], + [ + 1406, + 590 + ], + [ + 1391, + 588 + ], + [ + 1384, + 572 + ], + [ + 1380, + 532 + ], + [ + 1384, + 501 + ], + [ + 1390, + 488 + ], + [ + 1399, + 473 + ], + [ + 1396, + 463 + ], + [ + 1395, + 451 + ], + [ + 1404, + 448 + ], + [ + 1415, + 447 + ], + [ + 1421, + 452 + ], + [ + 1424, + 461 + ], + [ + 1433, + 436 + ], + [ + 1460, + 409 + ], + [ + 1476, + 399 + ], + [ + 1491, + 394 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1862, + 0 + ], + [ + 2048, + 3 + ], + [ + 2048, + 644 + ], + [ + 1850, + 630 + ], + [ + 1847, + 585 + ], + [ + 1855, + 583 + ], + [ + 1861, + 120 + ], + [ + 1862, + 30 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1922, + 530 + ], + [ + 2015, + 532 + ], + [ + 2013, + 539 + ], + [ + 1926, + 542 + ], + [ + 1920, + 648 + ], + [ + 1907, + 647 + ], + [ + 1908, + 536 + ], + [ + 1916, + 529 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1999, + 505 + ], + [ + 2011, + 502 + ], + [ + 2034, + 498 + ], + [ + 2048, + 489 + ], + [ + 2047, + 755 + ], + [ + 2022, + 757 + ], + [ + 1981, + 749 + ], + [ + 1953, + 718 + ], + [ + 1938, + 649 + ], + [ + 1935, + 602 + ], + [ + 1937, + 565 + ], + [ + 1940, + 551 + ], + [ + 1970, + 538 + ], + [ + 1993, + 526 + ], + [ + 1993, + 512 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 920, + 372 + ], + [ + 920, + 386 + ], + [ + 914, + 386 + ], + [ + 914, + 372 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 910, + 376 + ], + [ + 920, + 374 + ], + [ + 920, + 379 + ], + [ + 879, + 391 + ], + [ + 877, + 439 + ], + [ + 874, + 440 + ], + [ + 874, + 386 + ], + [ + 901, + 378 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 870, + 367 + ], + [ + 867, + 438 + ], + [ + 862, + 440 + ], + [ + 866, + 366 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 906, + 359 + ], + [ + 906, + 379 + ], + [ + 897, + 379 + ], + [ + 897, + 361 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 851, + 378 + ], + [ + 898, + 366 + ], + [ + 897, + 370 + ], + [ + 855, + 381 + ], + [ + 853, + 444 + ], + [ + 850, + 444 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 854, + 394 + ], + [ + 862, + 410 + ], + [ + 857, + 412 + ], + [ + 855, + 429 + ], + [ + 848, + 429 + ], + [ + 849, + 410 + ], + [ + 844, + 410 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 810, + 373 + ], + [ + 808, + 445 + ], + [ + 805, + 444 + ], + [ + 806, + 373 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 816, + 393 + ], + [ + 810, + 401 + ], + [ + 810, + 421 + ], + [ + 800, + 421 + ], + [ + 799, + 394 + ], + [ + 796, + 392 + ], + [ + 796, + 381 + ], + [ + 819, + 381 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 772, + 440 + ], + [ + 780, + 436 + ], + [ + 796, + 434 + ], + [ + 808, + 436 + ], + [ + 821, + 449 + ], + [ + 821, + 461 + ], + [ + 814, + 462 + ], + [ + 810, + 465 + ], + [ + 792, + 465 + ], + [ + 772, + 463 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 791, + 376 + ], + [ + 784, + 371 + ], + [ + 771, + 371 + ], + [ + 763, + 375 + ], + [ + 759, + 373 + ], + [ + 759, + 362 + ], + [ + 754, + 356 + ], + [ + 740, + 353 + ], + [ + 721, + 339 + ], + [ + 699, + 303 + ], + [ + 699, + 258 + ], + [ + 713, + 236 + ], + [ + 739, + 221 + ], + [ + 803, + 202 + ], + [ + 811, + 204 + ], + [ + 817, + 211 + ], + [ + 823, + 211 + ], + [ + 834, + 211 + ], + [ + 837, + 212 + ], + [ + 841, + 216 + ], + [ + 848, + 219 + ], + [ + 854, + 219 + ], + [ + 861, + 219 + ], + [ + 864, + 228 + ], + [ + 867, + 233 + ], + [ + 878, + 235 + ], + [ + 883, + 240 + ], + [ + 890, + 250 + ], + [ + 880, + 255 + ], + [ + 878, + 260 + ], + [ + 868, + 267 + ], + [ + 871, + 272 + ], + [ + 870, + 283 + ], + [ + 870, + 295 + ], + [ + 865, + 304 + ], + [ + 871, + 316 + ], + [ + 873, + 324 + ], + [ + 868, + 334 + ], + [ + 858, + 339 + ], + [ + 853, + 339 + ], + [ + 852, + 345 + ], + [ + 856, + 351 + ], + [ + 864, + 355 + ], + [ + 868, + 359 + ], + [ + 870, + 364 + ], + [ + 867, + 370 + ], + [ + 864, + 379 + ], + [ + 858, + 387 + ], + [ + 850, + 390 + ], + [ + 839, + 385 + ], + [ + 826, + 383 + ], + [ + 812, + 381 + ], + [ + 799, + 382 + ], + [ + 799, + 439 + ], + [ + 798, + 468 + ], + [ + 792, + 468 + ], + [ + 795, + 413 + ], + [ + 793, + 390 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 652, + 59 + ], + [ + 657, + 142 + ], + [ + 693, + 276 + ], + [ + 696, + 460 + ], + [ + 519, + 472 + ], + [ + 358, + 494 + ], + [ + 124, + 502 + ], + [ + 0, + 509 + ], + [ + 0, + 3 + ], + [ + 591, + 1 + ], + [ + 609, + 25 + ], + [ + 627, + 55 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 668, + 416 + ], + [ + 678, + 407 + ], + [ + 679, + 394 + ], + [ + 675, + 384 + ], + [ + 674, + 371 + ], + [ + 676, + 364 + ], + [ + 667, + 357 + ], + [ + 663, + 371 + ], + [ + 656, + 367 + ], + [ + 654, + 361 + ], + [ + 648, + 354 + ], + [ + 640, + 351 + ], + [ + 641, + 370 + ], + [ + 645, + 371 + ], + [ + 647, + 403 + ], + [ + 628, + 397 + ], + [ + 626, + 372 + ], + [ + 619, + 367 + ], + [ + 604, + 358 + ], + [ + 604, + 348 + ], + [ + 601, + 336 + ], + [ + 585, + 323 + ], + [ + 563, + 304 + ], + [ + 549, + 253 + ], + [ + 547, + 123 + ], + [ + 557, + 73 + ], + [ + 572, + 65 + ], + [ + 583, + 62 + ], + [ + 592, + 58 + ], + [ + 601, + 55 + ], + [ + 609, + 57 + ], + [ + 617, + 61 + ], + [ + 621, + 63 + ], + [ + 633, + 68 + ], + [ + 643, + 70 + ], + [ + 647, + 62 + ], + [ + 653, + 56 + ], + [ + 655, + 56 + ], + [ + 660, + 56 + ], + [ + 665, + 62 + ], + [ + 669, + 68 + ], + [ + 676, + 70 + ], + [ + 688, + 73 + ], + [ + 702, + 73 + ], + [ + 721, + 81 + ], + [ + 726, + 81 + ], + [ + 732, + 81 + ], + [ + 744, + 87 + ], + [ + 755, + 91 + ], + [ + 770, + 95 + ], + [ + 787, + 114 + ], + [ + 810, + 131 + ], + [ + 822, + 146 + ], + [ + 831, + 163 + ], + [ + 839, + 177 + ], + [ + 841, + 192 + ], + [ + 838, + 199 + ], + [ + 823, + 199 + ], + [ + 813, + 218 + ], + [ + 822, + 231 + ], + [ + 838, + 251 + ], + [ + 832, + 269 + ], + [ + 813, + 283 + ], + [ + 793, + 283 + ], + [ + 795, + 298 + ], + [ + 798, + 310 + ], + [ + 779, + 318 + ], + [ + 755, + 325 + ], + [ + 735, + 335 + ], + [ + 731, + 344 + ], + [ + 730, + 349 + ], + [ + 720, + 356 + ], + [ + 716, + 363 + ], + [ + 705, + 367 + ], + [ + 705, + 373 + ], + [ + 708, + 381 + ], + [ + 709, + 394 + ], + [ + 709, + 399 + ], + [ + 707, + 406 + ], + [ + 695, + 436 + ], + [ + 685, + 448 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 723, + 461 + ], + [ + 707, + 436 + ], + [ + 722, + 436 + ], + [ + 755, + 435 + ], + [ + 777, + 443 + ], + [ + 781, + 458 + ], + [ + 774, + 468 + ], + [ + 754, + 476 + ], + [ + 741, + 478 + ], + [ + 730, + 480 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 669, + 442 + ], + [ + 705, + 437 + ], + [ + 726, + 441 + ], + [ + 735, + 454 + ], + [ + 735, + 467 + ], + [ + 732, + 477 + ], + [ + 726, + 482 + ], + [ + 719, + 484 + ], + [ + 700, + 480 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 464, + 355 + ], + [ + 466, + 422 + ], + [ + 462, + 408 + ], + [ + 460, + 354 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 563, + 364 + ], + [ + 561, + 407 + ], + [ + 559, + 405 + ], + [ + 558, + 360 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 847, + 431 + ], + [ + 865, + 429 + ], + [ + 879, + 433 + ], + [ + 881, + 443 + ], + [ + 882, + 454 + ], + [ + 881, + 464 + ], + [ + 879, + 466 + ], + [ + 874, + 466 + ], + [ + 874, + 461 + ], + [ + 852, + 461 + ], + [ + 848, + 466 + ], + [ + 842, + 464 + ], + [ + 842, + 446 + ], + [ + 845, + 439 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 508, + 369 + ], + [ + 505, + 433 + ], + [ + 505, + 433 + ], + [ + 505, + 384 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 512, + 349 + ], + [ + 514, + 396 + ], + [ + 500, + 395 + ], + [ + 500, + 346 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 519, + 442 + ], + [ + 518, + 381 + ], + [ + 518, + 353 + ], + [ + 518, + 333 + ], + [ + 505, + 329 + ], + [ + 487, + 326 + ], + [ + 477, + 322 + ], + [ + 485, + 383 + ], + [ + 489, + 451 + ], + [ + 481, + 452 + ], + [ + 470, + 362 + ], + [ + 464, + 331 + ], + [ + 464, + 289 + ], + [ + 448, + 283 + ], + [ + 433, + 298 + ], + [ + 422, + 296 + ], + [ + 410, + 289 + ], + [ + 401, + 281 + ], + [ + 401, + 269 + ], + [ + 397, + 261 + ], + [ + 383, + 260 + ], + [ + 373, + 274 + ], + [ + 353, + 286 + ], + [ + 329, + 285 + ], + [ + 311, + 284 + ], + [ + 275, + 280 + ], + [ + 272, + 274 + ], + [ + 270, + 262 + ], + [ + 273, + 252 + ], + [ + 277, + 248 + ], + [ + 293, + 247 + ], + [ + 313, + 243 + ], + [ + 327, + 239 + ], + [ + 336, + 241 + ], + [ + 344, + 239 + ], + [ + 346, + 237 + ], + [ + 321, + 225 + ], + [ + 293, + 225 + ], + [ + 265, + 225 + ], + [ + 256, + 212 + ], + [ + 259, + 200 + ], + [ + 275, + 193 + ], + [ + 290, + 190 + ], + [ + 314, + 184 + ], + [ + 325, + 171 + ], + [ + 332, + 161 + ], + [ + 340, + 159 + ], + [ + 354, + 154 + ], + [ + 348, + 142 + ], + [ + 276, + 164 + ], + [ + 254, + 170 + ], + [ + 242, + 159 + ], + [ + 245, + 157 + ], + [ + 269, + 150 + ], + [ + 281, + 143 + ], + [ + 298, + 140 + ], + [ + 322, + 125 + ], + [ + 337, + 121 + ], + [ + 351, + 117 + ], + [ + 344, + 104 + ], + [ + 316, + 99 + ], + [ + 298, + 99 + ], + [ + 278, + 108 + ], + [ + 268, + 115 + ], + [ + 259, + 113 + ], + [ + 247, + 106 + ], + [ + 233, + 104 + ], + [ + 201, + 90 + ], + [ + 189, + 85 + ], + [ + 187, + 75 + ], + [ + 192, + 62 + ], + [ + 198, + 50 + ], + [ + 206, + 40 + ], + [ + 203, + 32 + ], + [ + 201, + 21 + ], + [ + 210, + 11 + ], + [ + 235, + 0 + ], + [ + 561, + 4 + ], + [ + 561, + 15 + ], + [ + 547, + 29 + ], + [ + 518, + 45 + ], + [ + 519, + 57 + ], + [ + 536, + 64 + ], + [ + 552, + 68 + ], + [ + 561, + 80 + ], + [ + 568, + 122 + ], + [ + 578, + 155 + ], + [ + 583, + 183 + ], + [ + 599, + 227 + ], + [ + 596, + 256 + ], + [ + 588, + 291 + ], + [ + 571, + 325 + ], + [ + 557, + 325 + ], + [ + 543, + 326 + ], + [ + 526, + 331 + ], + [ + 528, + 391 + ], + [ + 525, + 435 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 685, + 271 + ], + [ + 696, + 278 + ], + [ + 698, + 464 + ], + [ + 690, + 454 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 603, + 271 + ], + [ + 691, + 273 + ], + [ + 691, + 293 + ], + [ + 600, + 296 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 667, + 308 + ], + [ + 678, + 298 + ], + [ + 690, + 292 + ], + [ + 708, + 298 + ], + [ + 717, + 309 + ], + [ + 717, + 327 + ], + [ + 714, + 338 + ], + [ + 702, + 342 + ], + [ + 688, + 342 + ], + [ + 671, + 332 + ], + [ + 667, + 318 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 372, + 323 + ], + [ + 371, + 389 + ], + [ + 365, + 389 + ], + [ + 366, + 318 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 380, + 284 + ], + [ + 379, + 325 + ], + [ + 361, + 326 + ], + [ + 363, + 286 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 162, + 320 + ], + [ + 173, + 329 + ], + [ + 185, + 338 + ], + [ + 193, + 346 + ], + [ + 197, + 381 + ], + [ + 0, + 423 + ], + [ + 4, + 345 + ], + [ + 19, + 350 + ], + [ + 29, + 339 + ], + [ + 43, + 327 + ], + [ + 61, + 322 + ], + [ + 76, + 327 + ], + [ + 89, + 333 + ], + [ + 106, + 338 + ], + [ + 122, + 339 + ], + [ + 135, + 337 + ], + [ + 151, + 330 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 202, + 352 + ], + [ + 201, + 380 + ], + [ + 187, + 381 + ], + [ + 187, + 371 + ], + [ + 192, + 354 + ], + [ + 199, + 350 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 76, + 303 + ], + [ + 89, + 342 + ], + [ + 92, + 357 + ], + [ + 91, + 380 + ], + [ + 53, + 379 + ], + [ + 56, + 342 + ], + [ + 65, + 316 + ], + [ + 71, + 306 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 80, + 318 + ], + [ + 81, + 416 + ], + [ + 73, + 416 + ], + [ + 67, + 318 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 125, + 15 + ], + [ + 127, + 75 + ], + [ + 141, + 325 + ], + [ + 137, + 393 + ], + [ + 111, + 397 + ], + [ + 114, + 328 + ], + [ + 108, + 238 + ], + [ + 107, + 86 + ], + [ + 104, + 21 + ], + [ + 102, + 0 + ], + [ + 123, + 0 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 26, + 10 + ], + [ + 28, + 41 + ], + [ + 30, + 91 + ], + [ + 18, + 164 + ], + [ + 8, + 226 + ], + [ + 15, + 253 + ], + [ + 12, + 273 + ], + [ + 8, + 327 + ], + [ + 0, + 394 + ], + [ + 0, + 0 + ], + [ + 28, + 0 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 685, + 479 + ], + [ + 713, + 484 + ], + [ + 728, + 487 + ], + [ + 736, + 491 + ], + [ + 736, + 495 + ], + [ + 719, + 498 + ], + [ + 702, + 500 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 411, + 526 + ], + [ + 714, + 496 + ], + [ + 726, + 506 + ], + [ + 739, + 511 + ], + [ + 743, + 515 + ], + [ + 745, + 518 + ], + [ + 746, + 524 + ], + [ + 744, + 530 + ], + [ + 737, + 533 + ], + [ + 391, + 540 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 387, + 427 + ], + [ + 406, + 428 + ], + [ + 422, + 432 + ], + [ + 425, + 438 + ], + [ + 443, + 469 + ], + [ + 452, + 513 + ], + [ + 447, + 529 + ], + [ + 438, + 533 + ], + [ + 431, + 533 + ], + [ + 427, + 539 + ], + [ + 419, + 549 + ], + [ + 409, + 553 + ], + [ + 390, + 554 + ], + [ + 377, + 518 + ], + [ + 379, + 444 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 435, + 397 + ], + [ + 455, + 397 + ], + [ + 488, + 408 + ], + [ + 515, + 426 + ], + [ + 523, + 418 + ], + [ + 538, + 404 + ], + [ + 567, + 396 + ], + [ + 606, + 394 + ], + [ + 642, + 393 + ], + [ + 663, + 391 + ], + [ + 671, + 395 + ], + [ + 682, + 405 + ], + [ + 694, + 427 + ], + [ + 700, + 423 + ], + [ + 713, + 426 + ], + [ + 717, + 432 + ], + [ + 718, + 438 + ], + [ + 712, + 444 + ], + [ + 713, + 453 + ], + [ + 718, + 478 + ], + [ + 716, + 521 + ], + [ + 715, + 541 + ], + [ + 713, + 545 + ], + [ + 704, + 549 + ], + [ + 699, + 563 + ], + [ + 681, + 562 + ], + [ + 673, + 551 + ], + [ + 669, + 546 + ], + [ + 642, + 541 + ], + [ + 613, + 542 + ], + [ + 600, + 544 + ], + [ + 582, + 544 + ], + [ + 575, + 544 + ], + [ + 558, + 546 + ], + [ + 557, + 551 + ], + [ + 540, + 555 + ], + [ + 532, + 549 + ], + [ + 530, + 547 + ], + [ + 528, + 559 + ], + [ + 523, + 570 + ], + [ + 503, + 569 + ], + [ + 494, + 560 + ], + [ + 492, + 548 + ], + [ + 491, + 529 + ], + [ + 453, + 529 + ], + [ + 439, + 519 + ], + [ + 436, + 484 + ], + [ + 435, + 433 + ], + [ + 435, + 418 + ], + [ + 434, + 404 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 36, + 434 + ], + [ + 51, + 414 + ], + [ + 91, + 388 + ], + [ + 138, + 375 + ], + [ + 213, + 367 + ], + [ + 276, + 369 + ], + [ + 334, + 371 + ], + [ + 370, + 375 + ], + [ + 384, + 385 + ], + [ + 393, + 410 + ], + [ + 402, + 448 + ], + [ + 408, + 502 + ], + [ + 408, + 530 + ], + [ + 407, + 548 + ], + [ + 400, + 564 + ], + [ + 384, + 573 + ], + [ + 377, + 581 + ], + [ + 366, + 589 + ], + [ + 352, + 589 + ], + [ + 340, + 588 + ], + [ + 331, + 577 + ], + [ + 284, + 579 + ], + [ + 224, + 574 + ], + [ + 112, + 513 + ], + [ + 64, + 457 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 12, + 412 + ], + [ + 76, + 408 + ], + [ + 163, + 418 + ], + [ + 201, + 423 + ], + [ + 242, + 454 + ], + [ + 262, + 481 + ], + [ + 273, + 512 + ], + [ + 278, + 529 + ], + [ + 277, + 555 + ], + [ + 263, + 579 + ], + [ + 242, + 600 + ], + [ + 220, + 611 + ], + [ + 193, + 619 + ], + [ + 133, + 617 + ], + [ + 55, + 548 + ], + [ + 8, + 435 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 155, + 461 + ], + [ + 173, + 481 + ], + [ + 188, + 516 + ], + [ + 193, + 550 + ], + [ + 195, + 585 + ], + [ + 185, + 613 + ], + [ + 172, + 625 + ], + [ + 155, + 631 + ], + [ + 140, + 644 + ], + [ + 127, + 650 + ], + [ + 108, + 652 + ], + [ + 93, + 644 + ], + [ + 88, + 636 + ], + [ + 78, + 632 + ], + [ + 32, + 634 + ], + [ + 28, + 634 + ], + [ + 25, + 634 + ], + [ + 20, + 633 + ], + [ + 12, + 633 + ], + [ + 10, + 633 + ], + [ + 7, + 633 + ], + [ + 2, + 633 + ], + [ + 0, + 631 + ], + [ + 0, + 412 + ], + [ + 12, + 411 + ], + [ + 76, + 414 + ], + [ + 115, + 423 + ], + [ + 128, + 435 + ], + [ + 144, + 450 + ] + ] + }, + { + "label": "rider", + "polygon": [ + [ + 780, + 427 + ], + [ + 780, + 434 + ], + [ + 782, + 441 + ], + [ + 782, + 453 + ], + [ + 781, + 464 + ], + [ + 775, + 474 + ], + [ + 763, + 482 + ], + [ + 761, + 469 + ], + [ + 757, + 463 + ], + [ + 763, + 455 + ], + [ + 758, + 450 + ], + [ + 754, + 447 + ], + [ + 760, + 436 + ], + [ + 765, + 429 + ], + [ + 768, + 422 + ], + [ + 775, + 422 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 787, + 445 + ], + [ + 785, + 449 + ], + [ + 776, + 451 + ], + [ + 776, + 459 + ], + [ + 775, + 483 + ], + [ + 773, + 488 + ], + [ + 768, + 489 + ], + [ + 765, + 479 + ], + [ + 765, + 470 + ], + [ + 766, + 450 + ], + [ + 759, + 450 + ], + [ + 755, + 449 + ], + [ + 755, + 447 + ], + [ + 763, + 444 + ], + [ + 768, + 446 + ], + [ + 773, + 446 + ], + [ + 775, + 443 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1630, + 528 + ], + [ + 1703, + 525 + ], + [ + 1706, + 544 + ], + [ + 1634, + 547 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 558, + 482 + ], + [ + 625, + 480 + ], + [ + 624, + 466 + ], + [ + 558, + 466 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000020_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000020_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..7f46424ec871f4ec89233fdf0440c239078ca6c2 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000020_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000020_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000020_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..9b52ee61127d48fd0e0adc79fd6dbac64fb8f097 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000020_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000021_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000021_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..de8c9909d994ea5631acee80f82edf0f33e78e07 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000021_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000022_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000022_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..4ce304e5ec794570dadffc35559dfa283a4b4f41 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000022_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000022_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000022_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..9a89e74650154c2f0f7c513a5d22ec3934bd0e84 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000022_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000024_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000024_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..246539dfe9761fd809673f6dd15aad43d02456ed Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000024_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000025_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000025_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8b035783c2ef85e4fff2cdb02ab1bd5143a82880 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000025_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..b7e216718b38f24dfb5c64aba26123d821d9a157 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..56775993d521ebad56b0de63c703b9d83991bd8b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..e1f342cb3031dd0e8d91e0c41333e143131cddb2 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000026_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000027_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000027_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..17eb59e9771bf9eed4abbcae358500d3c3b196f3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000027_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000027_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000027_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c80678d18d39bf4aadfc110cd8c15c27a0a350eb Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000027_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000028_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000028_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ea3eefa973faf213a1aa1b0c5106d0af8ba6ca4a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000028_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000029_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000029_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..26860a0233d11f5734d0ac3a439343fd2437c808 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000029_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000029_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000029_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..f99f8222af97d4af9106bc6221d0cef036a4895c --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000029_000019_gtFine_polygons.json @@ -0,0 +1,7133 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 625, + 1 + ], + [ + 1304, + 1 + ], + [ + 1178, + 377 + ], + [ + 703, + 353 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2, + 491 + ], + [ + 590, + 450 + ], + [ + 774, + 433 + ], + [ + 1190, + 458 + ], + [ + 1395, + 457 + ], + [ + 1660, + 505 + ], + [ + 1847, + 542 + ], + [ + 2008, + 564 + ], + [ + 2048, + 569 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 890, + 325 + ], + [ + 915, + 325 + ], + [ + 916, + 328 + ], + [ + 922, + 331 + ], + [ + 931, + 334 + ], + [ + 937, + 335 + ], + [ + 945, + 335 + ], + [ + 953, + 338 + ], + [ + 957, + 340 + ], + [ + 964, + 338 + ], + [ + 975, + 318 + ], + [ + 986, + 335 + ], + [ + 986, + 344 + ], + [ + 1007, + 348 + ], + [ + 1102, + 341 + ], + [ + 1119, + 391 + ], + [ + 1116, + 448 + ], + [ + 921, + 442 + ], + [ + 879, + 442 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 514, + 467 + ], + [ + 622, + 460 + ], + [ + 657, + 480 + ], + [ + 657, + 484 + ], + [ + 610, + 491 + ], + [ + 576, + 489 + ], + [ + 507, + 487 + ], + [ + 495, + 481 + ], + [ + 511, + 472 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 112, + 2 + ], + [ + 643, + 0 + ], + [ + 656, + 34 + ], + [ + 652, + 92 + ], + [ + 659, + 95 + ], + [ + 672, + 127 + ], + [ + 765, + 127 + ], + [ + 767, + 186 + ], + [ + 772, + 192 + ], + [ + 786, + 195 + ], + [ + 816, + 192 + ], + [ + 817, + 189 + ], + [ + 829, + 189 + ], + [ + 830, + 192 + ], + [ + 875, + 199 + ], + [ + 871, + 202 + ], + [ + 868, + 206 + ], + [ + 893, + 209 + ], + [ + 894, + 459 + ], + [ + 676, + 460 + ], + [ + 609, + 469 + ], + [ + 517, + 472 + ], + [ + 128, + 454 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 685, + 156 + ], + [ + 682, + 240 + ], + [ + 671, + 240 + ], + [ + 665, + 158 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 405, + 157 + ], + [ + 413, + 157 + ], + [ + 422, + 164 + ], + [ + 423, + 178 + ], + [ + 425, + 351 + ], + [ + 420, + 350 + ], + [ + 419, + 183 + ], + [ + 416, + 174 + ], + [ + 405, + 161 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 211, + 249 + ], + [ + 214, + 447 + ], + [ + 142, + 454 + ], + [ + 140, + 244 + ], + [ + 160, + 242 + ], + [ + 175, + 243 + ], + [ + 187, + 248 + ], + [ + 205, + 248 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 271, + 417 + ], + [ + 263, + 371 + ], + [ + 277, + 366 + ], + [ + 284, + 359 + ], + [ + 274, + 349 + ], + [ + 276, + 327 + ], + [ + 282, + 312 + ], + [ + 283, + 302 + ], + [ + 284, + 281 + ], + [ + 287, + 264 + ], + [ + 299, + 248 + ], + [ + 298, + 231 + ], + [ + 292, + 221 + ], + [ + 292, + 195 + ], + [ + 314, + 211 + ], + [ + 317, + 209 + ], + [ + 311, + 201 + ], + [ + 308, + 189 + ], + [ + 306, + 174 + ], + [ + 308, + 161 + ], + [ + 316, + 157 + ], + [ + 330, + 155 + ], + [ + 343, + 157 + ], + [ + 361, + 159 + ], + [ + 393, + 149 + ], + [ + 404, + 133 + ], + [ + 390, + 135 + ], + [ + 381, + 137 + ], + [ + 368, + 134 + ], + [ + 361, + 130 + ], + [ + 374, + 121 + ], + [ + 391, + 117 + ], + [ + 400, + 100 + ], + [ + 394, + 90 + ], + [ + 392, + 74 + ], + [ + 399, + 58 + ], + [ + 392, + 40 + ], + [ + 391, + 22 + ], + [ + 375, + 21 + ], + [ + 359, + 30 + ], + [ + 353, + 28 + ], + [ + 353, + 21 + ], + [ + 342, + 19 + ], + [ + 325, + 30 + ], + [ + 318, + 23 + ], + [ + 330, + 13 + ], + [ + 348, + 5 + ], + [ + 356, + 0 + ], + [ + 358, + 0 + ], + [ + 103, + 0 + ], + [ + 93, + 8 + ], + [ + 90, + 14 + ], + [ + 86, + 21 + ], + [ + 89, + 29 + ], + [ + 76, + 43 + ], + [ + 76, + 51 + ], + [ + 104, + 59 + ], + [ + 115, + 66 + ], + [ + 117, + 79 + ], + [ + 108, + 89 + ], + [ + 118, + 98 + ], + [ + 121, + 112 + ], + [ + 114, + 116 + ], + [ + 100, + 128 + ], + [ + 118, + 141 + ], + [ + 102, + 145 + ], + [ + 91, + 149 + ], + [ + 114, + 161 + ], + [ + 131, + 163 + ], + [ + 136, + 170 + ], + [ + 121, + 186 + ], + [ + 108, + 194 + ], + [ + 141, + 203 + ], + [ + 164, + 201 + ], + [ + 173, + 194 + ], + [ + 182, + 197 + ], + [ + 185, + 213 + ], + [ + 184, + 229 + ], + [ + 173, + 229 + ], + [ + 167, + 241 + ], + [ + 180, + 265 + ], + [ + 186, + 285 + ], + [ + 177, + 311 + ], + [ + 185, + 330 + ], + [ + 185, + 353 + ], + [ + 188, + 370 + ], + [ + 187, + 401 + ], + [ + 188, + 458 + ], + [ + 230, + 468 + ], + [ + 268, + 470 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 173, + 195 + ], + [ + 176, + 451 + ], + [ + 166, + 446 + ], + [ + 167, + 190 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 221, + 439 + ], + [ + 229, + 450 + ], + [ + 219, + 488 + ], + [ + 149, + 487 + ], + [ + 145, + 457 + ], + [ + 160, + 441 + ], + [ + 197, + 434 + ], + [ + 208, + 436 + ], + [ + 215, + 438 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 145, + 16 + ], + [ + 147, + 239 + ], + [ + 150, + 444 + ], + [ + 0, + 481 + ], + [ + 0, + 1 + ], + [ + 145, + 1 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 201, + 497 + ], + [ + 207, + 487 + ], + [ + 217, + 472 + ], + [ + 220, + 461 + ], + [ + 246, + 456 + ], + [ + 271, + 468 + ], + [ + 334, + 508 + ], + [ + 334, + 518 + ], + [ + 291, + 524 + ], + [ + 249, + 526 + ], + [ + 216, + 522 + ], + [ + 165, + 519 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 240, + 353 + ], + [ + 248, + 359 + ], + [ + 251, + 367 + ], + [ + 245, + 375 + ], + [ + 236, + 377 + ], + [ + 230, + 377 + ], + [ + 227, + 375 + ], + [ + 225, + 368 + ], + [ + 226, + 362 + ], + [ + 232, + 353 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 226, + 378 + ], + [ + 248, + 377 + ], + [ + 250, + 392 + ], + [ + 226, + 391 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 238, + 363 + ], + [ + 238, + 516 + ], + [ + 233, + 515 + ], + [ + 234, + 366 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 91, + 443 + ], + [ + 113, + 437 + ], + [ + 147, + 437 + ], + [ + 183, + 443 + ], + [ + 200, + 456 + ], + [ + 210, + 468 + ], + [ + 218, + 492 + ], + [ + 219, + 505 + ], + [ + 213, + 511 + ], + [ + 211, + 520 + ], + [ + 206, + 529 + ], + [ + 198, + 529 + ], + [ + 186, + 520 + ], + [ + 175, + 520 + ], + [ + 157, + 522 + ], + [ + 157, + 531 + ], + [ + 152, + 534 + ], + [ + 136, + 535 + ], + [ + 130, + 524 + ], + [ + 113, + 526 + ], + [ + 85, + 526 + ], + [ + 64, + 515 + ], + [ + 58, + 473 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 100, + 398 + ], + [ + 104, + 410 + ], + [ + 105, + 505 + ], + [ + 104, + 517 + ], + [ + 98, + 523 + ], + [ + 94, + 534 + ], + [ + 90, + 541 + ], + [ + 76, + 545 + ], + [ + 66, + 542 + ], + [ + 61, + 533 + ], + [ + 59, + 525 + ], + [ + 56, + 525 + ], + [ + 49, + 533 + ], + [ + 39, + 533 + ], + [ + 32, + 533 + ], + [ + 28, + 531 + ], + [ + 16, + 529 + ], + [ + 0, + 529 + ], + [ + 2, + 387 + ], + [ + 26, + 388 + ], + [ + 71, + 388 + ], + [ + 90, + 389 + ], + [ + 99, + 394 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 625, + 429 + ], + [ + 625, + 442 + ], + [ + 626, + 453 + ], + [ + 616, + 471 + ], + [ + 607, + 467 + ], + [ + 607, + 455 + ], + [ + 605, + 448 + ], + [ + 607, + 436 + ], + [ + 613, + 429 + ], + [ + 615, + 425 + ], + [ + 620, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 636, + 440 + ], + [ + 650, + 439 + ], + [ + 661, + 448 + ], + [ + 668, + 471 + ], + [ + 658, + 477 + ], + [ + 629, + 477 + ], + [ + 616, + 468 + ], + [ + 615, + 458 + ], + [ + 621, + 449 + ], + [ + 628, + 441 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 567, + 457 + ], + [ + 566, + 486 + ], + [ + 563, + 486 + ], + [ + 563, + 462 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 658, + 436 + ], + [ + 664, + 392 + ], + [ + 660, + 372 + ], + [ + 657, + 367 + ], + [ + 651, + 361 + ], + [ + 645, + 353 + ], + [ + 651, + 346 + ], + [ + 658, + 346 + ], + [ + 660, + 352 + ], + [ + 663, + 334 + ], + [ + 658, + 332 + ], + [ + 652, + 331 + ], + [ + 641, + 339 + ], + [ + 634, + 339 + ], + [ + 629, + 336 + ], + [ + 624, + 327 + ], + [ + 624, + 320 + ], + [ + 617, + 315 + ], + [ + 607, + 312 + ], + [ + 595, + 289 + ], + [ + 594, + 280 + ], + [ + 600, + 272 + ], + [ + 595, + 262 + ], + [ + 591, + 254 + ], + [ + 591, + 240 + ], + [ + 599, + 230 + ], + [ + 596, + 221 + ], + [ + 589, + 210 + ], + [ + 593, + 197 + ], + [ + 604, + 197 + ], + [ + 614, + 188 + ], + [ + 617, + 187 + ], + [ + 639, + 191 + ], + [ + 658, + 193 + ], + [ + 668, + 187 + ], + [ + 674, + 187 + ], + [ + 681, + 201 + ], + [ + 686, + 212 + ], + [ + 688, + 208 + ], + [ + 691, + 200 + ], + [ + 697, + 199 + ], + [ + 715, + 198 + ], + [ + 723, + 187 + ], + [ + 734, + 181 + ], + [ + 744, + 185 + ], + [ + 754, + 191 + ], + [ + 768, + 191 + ], + [ + 782, + 187 + ], + [ + 789, + 180 + ], + [ + 809, + 190 + ], + [ + 817, + 199 + ], + [ + 825, + 202 + ], + [ + 836, + 202 + ], + [ + 843, + 208 + ], + [ + 846, + 217 + ], + [ + 846, + 226 + ], + [ + 852, + 230 + ], + [ + 858, + 240 + ], + [ + 864, + 243 + ], + [ + 874, + 247 + ], + [ + 878, + 259 + ], + [ + 879, + 267 + ], + [ + 876, + 284 + ], + [ + 876, + 284 + ], + [ + 866, + 289 + ], + [ + 863, + 295 + ], + [ + 867, + 302 + ], + [ + 880, + 304 + ], + [ + 883, + 311 + ], + [ + 882, + 315 + ], + [ + 883, + 316 + ], + [ + 889, + 326 + ], + [ + 879, + 330 + ], + [ + 878, + 332 + ], + [ + 886, + 336 + ], + [ + 888, + 344 + ], + [ + 879, + 346 + ], + [ + 867, + 349 + ], + [ + 860, + 354 + ], + [ + 857, + 376 + ], + [ + 840, + 389 + ], + [ + 795, + 421 + ], + [ + 674, + 455 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 835, + 297 + ], + [ + 854, + 298 + ], + [ + 854, + 326 + ], + [ + 835, + 326 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 968, + 343 + ], + [ + 969, + 356 + ], + [ + 934, + 358 + ], + [ + 934, + 344 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 862, + 299 + ], + [ + 900, + 308 + ], + [ + 928, + 311 + ], + [ + 944, + 319 + ], + [ + 955, + 333 + ], + [ + 959, + 360 + ], + [ + 955, + 396 + ], + [ + 951, + 396 + ], + [ + 954, + 354 + ], + [ + 950, + 335 + ], + [ + 948, + 330 + ], + [ + 940, + 324 + ], + [ + 935, + 320 + ], + [ + 915, + 314 + ], + [ + 859, + 307 + ], + [ + 848, + 304 + ], + [ + 848, + 300 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1070, + 220 + ], + [ + 1067, + 225 + ], + [ + 1044, + 223 + ], + [ + 989, + 230 + ], + [ + 952, + 238 + ], + [ + 939, + 242 + ], + [ + 929, + 258 + ], + [ + 927, + 269 + ], + [ + 930, + 423 + ], + [ + 923, + 428 + ], + [ + 922, + 288 + ], + [ + 923, + 268 + ], + [ + 926, + 254 + ], + [ + 935, + 241 + ], + [ + 950, + 233 + ], + [ + 997, + 225 + ], + [ + 1031, + 220 + ], + [ + 1065, + 219 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1085, + 207 + ], + [ + 1087, + 213 + ], + [ + 1086, + 250 + ], + [ + 1086, + 254 + ], + [ + 1072, + 255 + ], + [ + 1067, + 252 + ], + [ + 1067, + 212 + ], + [ + 1070, + 209 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 920, + 347 + ], + [ + 938, + 346 + ], + [ + 952, + 352 + ], + [ + 954, + 385 + ], + [ + 944, + 385 + ], + [ + 942, + 389 + ], + [ + 928, + 392 + ], + [ + 921, + 389 + ], + [ + 915, + 383 + ], + [ + 915, + 355 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 933, + 304 + ], + [ + 938, + 311 + ], + [ + 939, + 320 + ], + [ + 939, + 325 + ], + [ + 930, + 330 + ], + [ + 920, + 330 + ], + [ + 913, + 322 + ], + [ + 914, + 311 + ], + [ + 918, + 306 + ], + [ + 926, + 303 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 593, + 416 + ], + [ + 584, + 404 + ], + [ + 592, + 398 + ], + [ + 603, + 407 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 595, + 379 + ], + [ + 603, + 383 + ], + [ + 605, + 387 + ], + [ + 604, + 396 + ], + [ + 601, + 399 + ], + [ + 592, + 400 + ], + [ + 587, + 400 + ], + [ + 582, + 394 + ], + [ + 583, + 384 + ], + [ + 588, + 381 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 565, + 369 + ], + [ + 602, + 367 + ], + [ + 605, + 376 + ], + [ + 567, + 377 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 687, + 190 + ], + [ + 611, + 203 + ], + [ + 605, + 210 + ], + [ + 605, + 485 + ], + [ + 609, + 485 + ], + [ + 609, + 211 + ], + [ + 618, + 206 + ], + [ + 665, + 197 + ], + [ + 690, + 194 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 779, + 264 + ], + [ + 727, + 271 + ], + [ + 719, + 280 + ], + [ + 719, + 367 + ], + [ + 726, + 408 + ], + [ + 725, + 285 + ], + [ + 726, + 274 + ], + [ + 776, + 271 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 670, + 351 + ], + [ + 730, + 349 + ], + [ + 731, + 404 + ], + [ + 671, + 403 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 643, + 458 + ], + [ + 643, + 481 + ], + [ + 632, + 482 + ], + [ + 630, + 458 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 622, + 459 + ], + [ + 625, + 484 + ], + [ + 624, + 483 + ], + [ + 617, + 458 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 894, + 481 + ], + [ + 889, + 528 + ], + [ + 873, + 530 + ], + [ + 860, + 538 + ], + [ + 844, + 547 + ], + [ + 837, + 555 + ], + [ + 822, + 558 + ], + [ + 812, + 564 + ], + [ + 782, + 567 + ], + [ + 782, + 518 + ], + [ + 845, + 497 + ], + [ + 877, + 483 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 789, + 390 + ], + [ + 800, + 392 + ], + [ + 811, + 392 + ], + [ + 821, + 387 + ], + [ + 833, + 387 + ], + [ + 846, + 387 + ], + [ + 858, + 384 + ], + [ + 871, + 377 + ], + [ + 882, + 374 + ], + [ + 887, + 365 + ], + [ + 892, + 353 + ], + [ + 899, + 353 + ], + [ + 904, + 363 + ], + [ + 906, + 378 + ], + [ + 906, + 400 + ], + [ + 913, + 416 + ], + [ + 908, + 432 + ], + [ + 899, + 438 + ], + [ + 905, + 447 + ], + [ + 901, + 451 + ], + [ + 893, + 459 + ], + [ + 892, + 463 + ], + [ + 893, + 470 + ], + [ + 891, + 477 + ], + [ + 885, + 484 + ], + [ + 882, + 493 + ], + [ + 871, + 496 + ], + [ + 862, + 499 + ], + [ + 851, + 500 + ], + [ + 851, + 504 + ], + [ + 854, + 505 + ], + [ + 852, + 516 + ], + [ + 847, + 522 + ], + [ + 841, + 524 + ], + [ + 830, + 524 + ], + [ + 821, + 524 + ], + [ + 809, + 524 + ], + [ + 798, + 524 + ], + [ + 793, + 527 + ], + [ + 793, + 530 + ], + [ + 799, + 534 + ], + [ + 798, + 544 + ], + [ + 796, + 549 + ], + [ + 800, + 556 + ], + [ + 803, + 564 + ], + [ + 801, + 569 + ], + [ + 802, + 576 + ], + [ + 802, + 581 + ], + [ + 810, + 588 + ], + [ + 820, + 588 + ], + [ + 823, + 593 + ], + [ + 823, + 602 + ], + [ + 822, + 602 + ], + [ + 811, + 602 + ], + [ + 802, + 602 + ], + [ + 799, + 606 + ], + [ + 791, + 610 + ], + [ + 765, + 608 + ], + [ + 754, + 610 + ], + [ + 740, + 610 + ], + [ + 726, + 606 + ], + [ + 707, + 610 + ], + [ + 674, + 610 + ], + [ + 639, + 594 + ], + [ + 609, + 576 + ], + [ + 601, + 562 + ], + [ + 598, + 553 + ], + [ + 602, + 546 + ], + [ + 608, + 542 + ], + [ + 605, + 537 + ], + [ + 608, + 533 + ], + [ + 620, + 528 + ], + [ + 625, + 525 + ], + [ + 627, + 519 + ], + [ + 632, + 514 + ], + [ + 645, + 505 + ], + [ + 645, + 498 + ], + [ + 650, + 495 + ], + [ + 652, + 495 + ], + [ + 649, + 484 + ], + [ + 650, + 471 + ], + [ + 657, + 464 + ], + [ + 655, + 458 + ], + [ + 643, + 446 + ], + [ + 639, + 429 + ], + [ + 639, + 424 + ], + [ + 651, + 427 + ], + [ + 653, + 422 + ], + [ + 647, + 411 + ], + [ + 646, + 395 + ], + [ + 653, + 386 + ], + [ + 666, + 385 + ], + [ + 679, + 379 + ], + [ + 687, + 373 + ], + [ + 699, + 369 + ], + [ + 702, + 364 + ], + [ + 717, + 355 + ], + [ + 724, + 345 + ], + [ + 730, + 342 + ], + [ + 739, + 353 + ], + [ + 736, + 366 + ], + [ + 738, + 374 + ], + [ + 742, + 383 + ], + [ + 737, + 394 + ], + [ + 741, + 396 + ], + [ + 762, + 389 + ], + [ + 763, + 382 + ], + [ + 774, + 381 + ], + [ + 784, + 384 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 596, + 560 + ], + [ + 628, + 554 + ], + [ + 683, + 553 + ], + [ + 697, + 558 + ], + [ + 695, + 610 + ], + [ + 688, + 616 + ], + [ + 685, + 620 + ], + [ + 609, + 623 + ], + [ + 607, + 616 + ], + [ + 600, + 612 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 513, + 589 + ], + [ + 547, + 588 + ], + [ + 577, + 588 + ], + [ + 600, + 592 + ], + [ + 600, + 624 + ], + [ + 602, + 658 + ], + [ + 596, + 664 + ], + [ + 587, + 665 + ], + [ + 587, + 670 + ], + [ + 586, + 674 + ], + [ + 570, + 676 + ], + [ + 522, + 678 + ], + [ + 512, + 664 + ], + [ + 506, + 612 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 138, + 816 + ], + [ + 359, + 833 + ], + [ + 408, + 837 + ], + [ + 439, + 847 + ], + [ + 452, + 885 + ], + [ + 450, + 942 + ], + [ + 413, + 969 + ], + [ + 347, + 1024 + ], + [ + 0, + 1024 + ], + [ + 2, + 800 + ], + [ + 87, + 809 + ], + [ + 100, + 811 + ], + [ + 109, + 811 + ], + [ + 116, + 811 + ], + [ + 131, + 814 + ], + [ + 133, + 815 + ], + [ + 133, + 816 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 439, + 26 + ], + [ + 444, + 36 + ], + [ + 395, + 41 + ], + [ + 337, + 49 + ], + [ + 324, + 59 + ], + [ + 327, + 329 + ], + [ + 321, + 332 + ], + [ + 317, + 59 + ], + [ + 327, + 47 + ], + [ + 365, + 40 + ], + [ + 419, + 34 + ], + [ + 426, + 30 + ], + [ + 435, + 26 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 307, + 362 + ], + [ + 301, + 353 + ], + [ + 293, + 349 + ], + [ + 288, + 341 + ], + [ + 285, + 335 + ], + [ + 286, + 321 + ], + [ + 298, + 306 + ], + [ + 311, + 296 + ], + [ + 317, + 290 + ], + [ + 327, + 276 + ], + [ + 337, + 270 + ], + [ + 351, + 282 + ], + [ + 353, + 294 + ], + [ + 361, + 295 + ], + [ + 374, + 296 + ], + [ + 386, + 304 + ], + [ + 391, + 313 + ], + [ + 385, + 324 + ], + [ + 387, + 339 + ], + [ + 402, + 341 + ], + [ + 408, + 337 + ], + [ + 437, + 328 + ], + [ + 445, + 320 + ], + [ + 458, + 310 + ], + [ + 465, + 320 + ], + [ + 466, + 339 + ], + [ + 466, + 361 + ], + [ + 478, + 374 + ], + [ + 493, + 378 + ], + [ + 514, + 371 + ], + [ + 533, + 380 + ], + [ + 540, + 393 + ], + [ + 544, + 400 + ], + [ + 536, + 415 + ], + [ + 531, + 425 + ], + [ + 521, + 435 + ], + [ + 533, + 443 + ], + [ + 550, + 436 + ], + [ + 562, + 440 + ], + [ + 562, + 461 + ], + [ + 548, + 474 + ], + [ + 539, + 494 + ], + [ + 530, + 494 + ], + [ + 532, + 503 + ], + [ + 536, + 508 + ], + [ + 543, + 512 + ], + [ + 550, + 519 + ], + [ + 543, + 536 + ], + [ + 536, + 552 + ], + [ + 528, + 560 + ], + [ + 517, + 560 + ], + [ + 506, + 557 + ], + [ + 497, + 571 + ], + [ + 505, + 578 + ], + [ + 521, + 587 + ], + [ + 532, + 595 + ], + [ + 546, + 604 + ], + [ + 551, + 624 + ], + [ + 550, + 646 + ], + [ + 540, + 667 + ], + [ + 535, + 681 + ], + [ + 542, + 696 + ], + [ + 547, + 708 + ], + [ + 530, + 722 + ], + [ + 511, + 719 + ], + [ + 489, + 740 + ], + [ + 497, + 753 + ], + [ + 496, + 774 + ], + [ + 487, + 788 + ], + [ + 474, + 784 + ], + [ + 451, + 784 + ], + [ + 424, + 787 + ], + [ + 406, + 788 + ], + [ + 368, + 796 + ], + [ + 337, + 805 + ], + [ + 304, + 806 + ], + [ + 264, + 809 + ], + [ + 228, + 810 + ], + [ + 201, + 805 + ], + [ + 172, + 792 + ], + [ + 155, + 774 + ], + [ + 153, + 743 + ], + [ + 157, + 728 + ], + [ + 147, + 711 + ], + [ + 153, + 695 + ], + [ + 166, + 682 + ], + [ + 179, + 640 + ], + [ + 193, + 624 + ], + [ + 221, + 599 + ], + [ + 236, + 585 + ], + [ + 235, + 572 + ], + [ + 225, + 560 + ], + [ + 207, + 557 + ], + [ + 219, + 542 + ], + [ + 228, + 539 + ], + [ + 255, + 541 + ], + [ + 263, + 528 + ], + [ + 265, + 510 + ], + [ + 266, + 492 + ], + [ + 250, + 470 + ], + [ + 242, + 453 + ], + [ + 238, + 432 + ], + [ + 238, + 409 + ], + [ + 248, + 390 + ], + [ + 256, + 382 + ], + [ + 274, + 377 + ], + [ + 285, + 376 + ], + [ + 298, + 372 + ], + [ + 303, + 367 + ], + [ + 307, + 371 + ], + [ + 312, + 367 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1800, + 525 + ], + [ + 1801, + 558 + ], + [ + 1746, + 563 + ], + [ + 1708, + 564 + ], + [ + 1655, + 560 + ], + [ + 1654, + 548 + ], + [ + 1767, + 536 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1365, + 487 + ], + [ + 1273, + 489 + ], + [ + 1263, + 485 + ], + [ + 1251, + 480 + ], + [ + 1260, + 478 + ], + [ + 1281, + 476 + ], + [ + 1279, + 471 + ], + [ + 1218, + 468 + ], + [ + 1219, + 457 + ], + [ + 1396, + 459 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1109, + 255 + ], + [ + 1135, + 215 + ], + [ + 1135, + 110 + ], + [ + 1143, + 98 + ], + [ + 1201, + 97 + ], + [ + 1200, + 70 + ], + [ + 1207, + 70 + ], + [ + 1216, + 66 + ], + [ + 1217, + 58 + ], + [ + 1222, + 59 + ], + [ + 1247, + 40 + ], + [ + 1560, + 70 + ], + [ + 1578, + 460 + ], + [ + 1366, + 471 + ], + [ + 1307, + 467 + ], + [ + 1257, + 461 + ], + [ + 1154, + 462 + ], + [ + 1109, + 440 + ], + [ + 1107, + 383 + ], + [ + 1109, + 307 + ], + [ + 1109, + 275 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1099, + 386 + ], + [ + 1124, + 387 + ], + [ + 1121, + 409 + ], + [ + 1126, + 409 + ], + [ + 1134, + 418 + ], + [ + 1132, + 439 + ], + [ + 1098, + 439 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 989, + 395 + ], + [ + 980, + 392 + ], + [ + 980, + 384 + ], + [ + 979, + 378 + ], + [ + 976, + 372 + ], + [ + 975, + 361 + ], + [ + 978, + 354 + ], + [ + 983, + 350 + ], + [ + 985, + 345 + ], + [ + 989, + 338 + ], + [ + 995, + 329 + ], + [ + 997, + 325 + ], + [ + 998, + 319 + ], + [ + 1010, + 312 + ], + [ + 1010, + 303 + ], + [ + 1013, + 296 + ], + [ + 1017, + 290 + ], + [ + 1023, + 280 + ], + [ + 1045, + 275 + ], + [ + 1045, + 281 + ], + [ + 1052, + 281 + ], + [ + 1055, + 285 + ], + [ + 1059, + 287 + ], + [ + 1064, + 286 + ], + [ + 1071, + 284 + ], + [ + 1072, + 281 + ], + [ + 1072, + 273 + ], + [ + 1072, + 267 + ], + [ + 1077, + 261 + ], + [ + 1086, + 252 + ], + [ + 1089, + 246 + ], + [ + 1095, + 244 + ], + [ + 1101, + 248 + ], + [ + 1104, + 254 + ], + [ + 1110, + 259 + ], + [ + 1114, + 255 + ], + [ + 1118, + 251 + ], + [ + 1122, + 254 + ], + [ + 1126, + 263 + ], + [ + 1129, + 263 + ], + [ + 1137, + 263 + ], + [ + 1142, + 274 + ], + [ + 1143, + 279 + ], + [ + 1148, + 272 + ], + [ + 1151, + 272 + ], + [ + 1162, + 275 + ], + [ + 1165, + 283 + ], + [ + 1169, + 292 + ], + [ + 1168, + 304 + ], + [ + 1160, + 313 + ], + [ + 1152, + 319 + ], + [ + 1145, + 324 + ], + [ + 1139, + 333 + ], + [ + 1150, + 345 + ], + [ + 1146, + 356 + ], + [ + 1135, + 368 + ], + [ + 1130, + 369 + ], + [ + 1128, + 381 + ], + [ + 1122, + 412 + ], + [ + 1127, + 443 + ], + [ + 1115, + 440 + ], + [ + 1114, + 411 + ], + [ + 1114, + 399 + ], + [ + 1114, + 388 + ], + [ + 1114, + 385 + ], + [ + 1102, + 380 + ], + [ + 1090, + 387 + ], + [ + 1084, + 380 + ], + [ + 1073, + 377 + ], + [ + 1069, + 379 + ], + [ + 1068, + 382 + ], + [ + 1066, + 393 + ], + [ + 1061, + 402 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1265, + 431 + ], + [ + 1331, + 430 + ], + [ + 1340, + 430 + ], + [ + 1344, + 416 + ], + [ + 1383, + 414 + ], + [ + 1382, + 475 + ], + [ + 1358, + 484 + ], + [ + 1341, + 474 + ], + [ + 1317, + 475 + ], + [ + 1265, + 463 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1198, + 254 + ], + [ + 1206, + 247 + ], + [ + 1202, + 242 + ], + [ + 1199, + 232 + ], + [ + 1215, + 226 + ], + [ + 1222, + 218 + ], + [ + 1226, + 214 + ], + [ + 1229, + 212 + ], + [ + 1238, + 222 + ], + [ + 1243, + 231 + ], + [ + 1257, + 225 + ], + [ + 1263, + 212 + ], + [ + 1272, + 201 + ], + [ + 1298, + 187 + ], + [ + 1349, + 173 + ], + [ + 1391, + 180 + ], + [ + 1415, + 189 + ], + [ + 1424, + 189 + ], + [ + 1435, + 201 + ], + [ + 1438, + 212 + ], + [ + 1437, + 224 + ], + [ + 1442, + 227 + ], + [ + 1449, + 227 + ], + [ + 1457, + 222 + ], + [ + 1461, + 221 + ], + [ + 1468, + 226 + ], + [ + 1475, + 225 + ], + [ + 1486, + 219 + ], + [ + 1495, + 228 + ], + [ + 1501, + 238 + ], + [ + 1506, + 245 + ], + [ + 1511, + 265 + ], + [ + 1513, + 282 + ], + [ + 1521, + 286 + ], + [ + 1523, + 294 + ], + [ + 1530, + 298 + ], + [ + 1546, + 300 + ], + [ + 1566, + 442 + ], + [ + 1413, + 435 + ], + [ + 1391, + 425 + ], + [ + 1391, + 422 + ], + [ + 1389, + 418 + ], + [ + 1383, + 411 + ], + [ + 1378, + 403 + ], + [ + 1372, + 395 + ], + [ + 1370, + 391 + ], + [ + 1364, + 387 + ], + [ + 1351, + 381 + ], + [ + 1338, + 392 + ], + [ + 1333, + 408 + ], + [ + 1330, + 471 + ], + [ + 1324, + 472 + ], + [ + 1317, + 399 + ], + [ + 1288, + 387 + ], + [ + 1280, + 389 + ], + [ + 1270, + 386 + ], + [ + 1262, + 393 + ], + [ + 1252, + 394 + ], + [ + 1238, + 390 + ], + [ + 1241, + 420 + ], + [ + 1247, + 436 + ], + [ + 1248, + 464 + ], + [ + 1236, + 464 + ], + [ + 1225, + 464 + ], + [ + 1222, + 455 + ], + [ + 1222, + 451 + ], + [ + 1232, + 442 + ], + [ + 1238, + 447 + ], + [ + 1236, + 430 + ], + [ + 1236, + 411 + ], + [ + 1234, + 400 + ], + [ + 1231, + 394 + ], + [ + 1225, + 395 + ], + [ + 1220, + 395 + ], + [ + 1212, + 393 + ], + [ + 1214, + 386 + ], + [ + 1204, + 388 + ], + [ + 1197, + 392 + ], + [ + 1194, + 404 + ], + [ + 1187, + 400 + ], + [ + 1186, + 395 + ], + [ + 1188, + 383 + ], + [ + 1177, + 382 + ], + [ + 1177, + 376 + ], + [ + 1168, + 367 + ], + [ + 1161, + 364 + ], + [ + 1154, + 366 + ], + [ + 1142, + 359 + ], + [ + 1136, + 346 + ], + [ + 1144, + 341 + ], + [ + 1147, + 339 + ], + [ + 1145, + 333 + ], + [ + 1146, + 328 + ], + [ + 1155, + 324 + ], + [ + 1155, + 322 + ], + [ + 1158, + 312 + ], + [ + 1162, + 310 + ], + [ + 1162, + 298 + ], + [ + 1163, + 288 + ], + [ + 1175, + 277 + ], + [ + 1174, + 270 + ], + [ + 1173, + 263 + ], + [ + 1183, + 257 + ], + [ + 1192, + 257 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1542, + 0 + ], + [ + 2048, + 2 + ], + [ + 2048, + 556 + ], + [ + 1540, + 430 + ], + [ + 1543, + 367 + ], + [ + 1535, + 366 + ], + [ + 1532, + 325 + ], + [ + 1544, + 322 + ], + [ + 1544, + 298 + ], + [ + 1532, + 298 + ], + [ + 1536, + 191 + ], + [ + 1538, + 68 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1454, + 203 + ], + [ + 1458, + 418 + ], + [ + 1452, + 420 + ], + [ + 1451, + 372 + ], + [ + 1444, + 374 + ], + [ + 1447, + 362 + ], + [ + 1450, + 361 + ], + [ + 1449, + 201 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1395, + 209 + ], + [ + 1394, + 437 + ], + [ + 1388, + 441 + ], + [ + 1391, + 206 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1385, + 372 + ], + [ + 1403, + 372 + ], + [ + 1403, + 406 + ], + [ + 1389, + 405 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1405, + 316 + ], + [ + 1405, + 326 + ], + [ + 1393, + 328 + ], + [ + 1393, + 317 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1446, + 335 + ], + [ + 1447, + 346 + ], + [ + 1398, + 346 + ], + [ + 1396, + 331 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1409, + 349 + ], + [ + 1416, + 349 + ], + [ + 1421, + 351 + ], + [ + 1428, + 356 + ], + [ + 1426, + 369 + ], + [ + 1423, + 374 + ], + [ + 1412, + 377 + ], + [ + 1404, + 374 + ], + [ + 1401, + 368 + ], + [ + 1401, + 361 + ], + [ + 1403, + 353 + ], + [ + 1405, + 352 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1425, + 381 + ], + [ + 1416, + 396 + ], + [ + 1400, + 385 + ], + [ + 1413, + 369 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1494, + 374 + ], + [ + 1494, + 385 + ], + [ + 1459, + 385 + ], + [ + 1459, + 377 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1342, + 396 + ], + [ + 1344, + 414 + ], + [ + 1286, + 412 + ], + [ + 1288, + 398 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1340, + 380 + ], + [ + 1342, + 397 + ], + [ + 1287, + 401 + ], + [ + 1286, + 384 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1341, + 367 + ], + [ + 1341, + 380 + ], + [ + 1289, + 383 + ], + [ + 1287, + 370 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1341, + 354 + ], + [ + 1340, + 369 + ], + [ + 1288, + 367 + ], + [ + 1288, + 355 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1341, + 341 + ], + [ + 1341, + 353 + ], + [ + 1286, + 355 + ], + [ + 1286, + 342 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1252, + 200 + ], + [ + 1320, + 211 + ], + [ + 1320, + 485 + ], + [ + 1310, + 488 + ], + [ + 1313, + 215 + ], + [ + 1241, + 207 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1306, + 420 + ], + [ + 1321, + 420 + ], + [ + 1321, + 427 + ], + [ + 1306, + 428 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1308, + 417 + ], + [ + 1304, + 410 + ], + [ + 1310, + 406 + ], + [ + 1317, + 406 + ], + [ + 1323, + 410 + ], + [ + 1323, + 415 + ], + [ + 1323, + 418 + ], + [ + 1318, + 420 + ], + [ + 1313, + 420 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1254, + 448 + ], + [ + 1253, + 465 + ], + [ + 1251, + 465 + ], + [ + 1253, + 443 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1262, + 445 + ], + [ + 1262, + 464 + ], + [ + 1261, + 464 + ], + [ + 1261, + 441 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1272, + 441 + ], + [ + 1269, + 468 + ], + [ + 1274, + 466 + ], + [ + 1274, + 437 + ] + ] + }, + { + "label": "polegroup", + "polygon": [ + [ + 1283, + 447 + ], + [ + 1279, + 470 + ], + [ + 1282, + 478 + ], + [ + 1287, + 475 + ], + [ + 1289, + 481 + ], + [ + 1292, + 481 + ], + [ + 1294, + 457 + ], + [ + 1290, + 453 + ], + [ + 1282, + 451 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1758, + 562 + ], + [ + 1698, + 564 + ], + [ + 2047, + 630 + ], + [ + 2047, + 573 + ], + [ + 1808, + 508 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1758, + 562 + ], + [ + 1698, + 564 + ], + [ + 2047, + 630 + ], + [ + 2047, + 573 + ], + [ + 1808, + 508 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1299, + 455 + ], + [ + 1299, + 481 + ], + [ + 1300, + 481 + ], + [ + 1300, + 450 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1418, + 298 + ], + [ + 1417, + 316 + ], + [ + 1370, + 317 + ], + [ + 1370, + 301 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1504, + 422 + ], + [ + 1508, + 352 + ], + [ + 1505, + 316 + ], + [ + 1500, + 306 + ], + [ + 1507, + 287 + ], + [ + 1503, + 273 + ], + [ + 1503, + 261 + ], + [ + 1508, + 249 + ], + [ + 1505, + 237 + ], + [ + 1501, + 228 + ], + [ + 1501, + 219 + ], + [ + 1506, + 206 + ], + [ + 1503, + 189 + ], + [ + 1483, + 188 + ], + [ + 1464, + 193 + ], + [ + 1452, + 204 + ], + [ + 1429, + 213 + ], + [ + 1418, + 215 + ], + [ + 1406, + 223 + ], + [ + 1387, + 227 + ], + [ + 1363, + 226 + ], + [ + 1344, + 215 + ], + [ + 1332, + 213 + ], + [ + 1321, + 222 + ], + [ + 1299, + 224 + ], + [ + 1294, + 213 + ], + [ + 1298, + 199 + ], + [ + 1297, + 184 + ], + [ + 1292, + 176 + ], + [ + 1266, + 181 + ], + [ + 1256, + 181 + ], + [ + 1243, + 181 + ], + [ + 1236, + 174 + ], + [ + 1235, + 168 + ], + [ + 1253, + 158 + ], + [ + 1272, + 154 + ], + [ + 1265, + 146 + ], + [ + 1256, + 143 + ], + [ + 1240, + 143 + ], + [ + 1231, + 138 + ], + [ + 1226, + 138 + ], + [ + 1211, + 134 + ], + [ + 1215, + 130 + ], + [ + 1222, + 124 + ], + [ + 1219, + 114 + ], + [ + 1211, + 113 + ], + [ + 1206, + 104 + ], + [ + 1211, + 97 + ], + [ + 1234, + 85 + ], + [ + 1239, + 73 + ], + [ + 1241, + 55 + ], + [ + 1233, + 48 + ], + [ + 1223, + 44 + ], + [ + 1221, + 42 + ], + [ + 1216, + 33 + ], + [ + 1219, + 28 + ], + [ + 1228, + 26 + ], + [ + 1240, + 17 + ], + [ + 1247, + 7 + ], + [ + 1240, + 0 + ], + [ + 1722, + 0 + ], + [ + 1693, + 30 + ], + [ + 1660, + 48 + ], + [ + 1641, + 57 + ], + [ + 1627, + 68 + ], + [ + 1626, + 74 + ], + [ + 1631, + 82 + ], + [ + 1638, + 87 + ], + [ + 1649, + 82 + ], + [ + 1661, + 82 + ], + [ + 1671, + 90 + ], + [ + 1681, + 106 + ], + [ + 1684, + 119 + ], + [ + 1686, + 129 + ], + [ + 1688, + 141 + ], + [ + 1675, + 157 + ], + [ + 1668, + 154 + ], + [ + 1643, + 161 + ], + [ + 1634, + 161 + ], + [ + 1621, + 158 + ], + [ + 1608, + 158 + ], + [ + 1586, + 160 + ], + [ + 1573, + 167 + ], + [ + 1568, + 171 + ], + [ + 1567, + 181 + ], + [ + 1562, + 192 + ], + [ + 1548, + 201 + ], + [ + 1537, + 202 + ], + [ + 1529, + 206 + ], + [ + 1524, + 213 + ], + [ + 1523, + 223 + ], + [ + 1521, + 231 + ], + [ + 1523, + 241 + ], + [ + 1525, + 247 + ], + [ + 1525, + 288 + ], + [ + 1525, + 319 + ], + [ + 1528, + 409 + ], + [ + 1530, + 439 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1617, + 451 + ], + [ + 1624, + 422 + ], + [ + 1633, + 413 + ], + [ + 1647, + 408 + ], + [ + 1662, + 394 + ], + [ + 1739, + 390 + ], + [ + 1764, + 394 + ], + [ + 1773, + 524 + ], + [ + 1751, + 531 + ], + [ + 1731, + 526 + ], + [ + 1706, + 530 + ], + [ + 1702, + 542 + ], + [ + 1693, + 542 + ], + [ + 1689, + 536 + ], + [ + 1685, + 530 + ], + [ + 1669, + 530 + ], + [ + 1667, + 532 + ], + [ + 1667, + 536 + ], + [ + 1666, + 547 + ], + [ + 1647, + 547 + ], + [ + 1615, + 475 + ], + [ + 1615, + 464 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1700, + 30 + ], + [ + 1674, + 28 + ], + [ + 1655, + 31 + ], + [ + 1642, + 46 + ], + [ + 1596, + 72 + ], + [ + 1569, + 90 + ], + [ + 1552, + 90 + ], + [ + 1533, + 81 + ], + [ + 1511, + 80 + ], + [ + 1473, + 97 + ], + [ + 1452, + 91 + ], + [ + 1442, + 83 + ], + [ + 1431, + 69 + ], + [ + 1446, + 44 + ], + [ + 1452, + 21 + ], + [ + 1456, + 11 + ], + [ + 1480, + 0 + ], + [ + 1842, + 2 + ], + [ + 1818, + 9 + ], + [ + 1810, + 16 + ], + [ + 1799, + 35 + ], + [ + 1794, + 48 + ], + [ + 1783, + 72 + ], + [ + 1802, + 78 + ], + [ + 1827, + 76 + ], + [ + 1837, + 81 + ], + [ + 1848, + 86 + ], + [ + 1855, + 92 + ], + [ + 1855, + 104 + ], + [ + 1847, + 107 + ], + [ + 1831, + 94 + ], + [ + 1820, + 94 + ], + [ + 1818, + 115 + ], + [ + 1828, + 124 + ], + [ + 1838, + 127 + ], + [ + 1859, + 126 + ], + [ + 1860, + 125 + ], + [ + 1863, + 132 + ], + [ + 1847, + 143 + ], + [ + 1836, + 141 + ], + [ + 1821, + 135 + ], + [ + 1809, + 131 + ], + [ + 1803, + 137 + ], + [ + 1794, + 145 + ], + [ + 1790, + 151 + ], + [ + 1796, + 158 + ], + [ + 1778, + 175 + ], + [ + 1779, + 284 + ], + [ + 1779, + 414 + ], + [ + 1779, + 484 + ], + [ + 1760, + 515 + ], + [ + 1754, + 496 + ], + [ + 1757, + 440 + ], + [ + 1759, + 374 + ], + [ + 1761, + 306 + ], + [ + 1761, + 245 + ], + [ + 1762, + 224 + ], + [ + 1748, + 224 + ], + [ + 1733, + 227 + ], + [ + 1727, + 220 + ], + [ + 1728, + 202 + ], + [ + 1738, + 187 + ], + [ + 1756, + 187 + ], + [ + 1756, + 180 + ], + [ + 1754, + 166 + ], + [ + 1750, + 161 + ], + [ + 1730, + 173 + ], + [ + 1725, + 169 + ], + [ + 1719, + 158 + ], + [ + 1715, + 159 + ], + [ + 1721, + 142 + ], + [ + 1730, + 138 + ], + [ + 1739, + 135 + ], + [ + 1740, + 133 + ], + [ + 1747, + 104 + ], + [ + 1733, + 102 + ], + [ + 1696, + 124 + ], + [ + 1704, + 113 + ], + [ + 1735, + 95 + ], + [ + 1747, + 91 + ], + [ + 1750, + 79 + ], + [ + 1746, + 62 + ], + [ + 1728, + 71 + ], + [ + 1717, + 81 + ], + [ + 1706, + 88 + ], + [ + 1701, + 81 + ], + [ + 1707, + 72 + ], + [ + 1709, + 65 + ], + [ + 1703, + 59 + ], + [ + 1695, + 47 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1727, + 481 + ], + [ + 1733, + 540 + ], + [ + 1727, + 537 + ], + [ + 1722, + 499 + ], + [ + 1717, + 538 + ], + [ + 1711, + 537 + ], + [ + 1713, + 499 + ], + [ + 1716, + 482 + ], + [ + 1723, + 478 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1700, + 475 + ], + [ + 1711, + 545 + ], + [ + 1705, + 547 + ], + [ + 1699, + 518 + ], + [ + 1696, + 546 + ], + [ + 1691, + 546 + ], + [ + 1695, + 506 + ], + [ + 1696, + 483 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1936, + 0 + ], + [ + 2048, + 0 + ], + [ + 2046, + 63 + ], + [ + 1926, + 61 + ], + [ + 1929, + 49 + ], + [ + 1934, + 38 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1910, + 198 + ], + [ + 1920, + 198 + ], + [ + 1974, + 201 + ], + [ + 1990, + 205 + ], + [ + 1991, + 214 + ], + [ + 1980, + 214 + ], + [ + 1979, + 275 + ], + [ + 1989, + 275 + ], + [ + 1990, + 287 + ], + [ + 1973, + 288 + ], + [ + 1970, + 290 + ], + [ + 1908, + 289 + ], + [ + 1909, + 259 + ], + [ + 1909, + 220 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1796, + 457 + ], + [ + 1817, + 442 + ], + [ + 1875, + 430 + ], + [ + 1937, + 427 + ], + [ + 1995, + 434 + ], + [ + 2030, + 468 + ], + [ + 1935, + 563 + ], + [ + 1890, + 568 + ], + [ + 1887, + 578 + ], + [ + 1878, + 586 + ], + [ + 1861, + 588 + ], + [ + 1844, + 579 + ], + [ + 1841, + 564 + ], + [ + 1840, + 561 + ], + [ + 1791, + 558 + ], + [ + 1791, + 566 + ], + [ + 1785, + 574 + ], + [ + 1766, + 574 + ], + [ + 1755, + 560 + ], + [ + 1753, + 524 + ], + [ + 1756, + 505 + ], + [ + 1769, + 488 + ], + [ + 1765, + 483 + ], + [ + 1766, + 476 + ], + [ + 1788, + 474 + ], + [ + 1791, + 471 + ], + [ + 1794, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1987, + 477 + ], + [ + 2022, + 451 + ], + [ + 2048, + 439 + ], + [ + 2048, + 599 + ], + [ + 2013, + 597 + ], + [ + 1984, + 598 + ], + [ + 1981, + 604 + ], + [ + 1962, + 614 + ], + [ + 1948, + 614 + ], + [ + 1936, + 606 + ], + [ + 1930, + 578 + ], + [ + 1929, + 554 + ], + [ + 1932, + 528 + ], + [ + 1946, + 513 + ], + [ + 1962, + 502 + ], + [ + 1961, + 497 + ], + [ + 1958, + 490 + ], + [ + 1959, + 487 + ], + [ + 1966, + 482 + ], + [ + 1973, + 477 + ], + [ + 1981, + 477 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1143, + 381 + ], + [ + 1156, + 379 + ], + [ + 1155, + 415 + ], + [ + 1145, + 413 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1159, + 389 + ], + [ + 1156, + 448 + ], + [ + 1160, + 448 + ], + [ + 1162, + 386 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1174, + 403 + ], + [ + 1183, + 406 + ], + [ + 1184, + 421 + ], + [ + 1169, + 422 + ], + [ + 1169, + 412 + ], + [ + 1176, + 413 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1177, + 399 + ], + [ + 1177, + 441 + ], + [ + 1175, + 441 + ], + [ + 1175, + 396 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1139, + 471 + ], + [ + 1146, + 460 + ], + [ + 1150, + 452 + ], + [ + 1155, + 442 + ], + [ + 1159, + 436 + ], + [ + 1169, + 435 + ], + [ + 1201, + 436 + ], + [ + 1211, + 436 + ], + [ + 1220, + 440 + ], + [ + 1231, + 457 + ], + [ + 1235, + 466 + ], + [ + 1235, + 488 + ], + [ + 1235, + 500 + ], + [ + 1235, + 510 + ], + [ + 1234, + 512 + ], + [ + 1225, + 514 + ], + [ + 1219, + 513 + ], + [ + 1215, + 510 + ], + [ + 1215, + 505 + ], + [ + 1190, + 502 + ], + [ + 1158, + 504 + ], + [ + 1147, + 504 + ], + [ + 1142, + 491 + ], + [ + 1141, + 480 + ], + [ + 1140, + 477 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 945, + 418 + ], + [ + 965, + 390 + ], + [ + 1058, + 392 + ], + [ + 1072, + 419 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1070, + 436 + ], + [ + 1100, + 430 + ], + [ + 1135, + 436 + ], + [ + 1154, + 463 + ], + [ + 1160, + 483 + ], + [ + 1155, + 516 + ], + [ + 1153, + 524 + ], + [ + 1142, + 525 + ], + [ + 1139, + 522 + ], + [ + 1134, + 511 + ], + [ + 1115, + 512 + ], + [ + 1096, + 491 + ], + [ + 1080, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 916, + 436 + ], + [ + 930, + 416 + ], + [ + 937, + 408 + ], + [ + 948, + 388 + ], + [ + 963, + 386 + ], + [ + 968, + 390 + ], + [ + 972, + 402 + ], + [ + 1013, + 403 + ], + [ + 1044, + 403 + ], + [ + 1046, + 395 + ], + [ + 1051, + 390 + ], + [ + 1066, + 389 + ], + [ + 1072, + 401 + ], + [ + 1087, + 422 + ], + [ + 1096, + 443 + ], + [ + 1104, + 464 + ], + [ + 1109, + 459 + ], + [ + 1120, + 462 + ], + [ + 1130, + 465 + ], + [ + 1133, + 471 + ], + [ + 1128, + 477 + ], + [ + 1119, + 484 + ], + [ + 1118, + 491 + ], + [ + 1116, + 497 + ], + [ + 1121, + 507 + ], + [ + 1121, + 539 + ], + [ + 1121, + 583 + ], + [ + 1119, + 598 + ], + [ + 1111, + 607 + ], + [ + 1091, + 608 + ], + [ + 1078, + 597 + ], + [ + 1069, + 583 + ], + [ + 1035, + 576 + ], + [ + 1009, + 579 + ], + [ + 988, + 574 + ], + [ + 981, + 574 + ], + [ + 953, + 578 + ], + [ + 932, + 582 + ], + [ + 908, + 579 + ], + [ + 908, + 590 + ], + [ + 906, + 598 + ], + [ + 893, + 601 + ], + [ + 883, + 595 + ], + [ + 883, + 560 + ], + [ + 881, + 525 + ], + [ + 883, + 499 + ], + [ + 893, + 471 + ], + [ + 889, + 469 + ], + [ + 886, + 462 + ], + [ + 890, + 458 + ], + [ + 902, + 457 + ], + [ + 912, + 445 + ], + [ + 914, + 442 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1362, + 431 + ], + [ + 1366, + 443 + ], + [ + 1372, + 450 + ], + [ + 1368, + 464 + ], + [ + 1365, + 471 + ], + [ + 1365, + 482 + ], + [ + 1356, + 486 + ], + [ + 1353, + 474 + ], + [ + 1352, + 468 + ], + [ + 1346, + 460 + ], + [ + 1349, + 457 + ], + [ + 1346, + 451 + ], + [ + 1354, + 446 + ], + [ + 1349, + 442 + ], + [ + 1349, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1379, + 448 + ], + [ + 1388, + 432 + ], + [ + 1403, + 419 + ], + [ + 1427, + 414 + ], + [ + 1490, + 409 + ], + [ + 1560, + 415 + ], + [ + 1588, + 416 + ], + [ + 1614, + 430 + ], + [ + 1641, + 474 + ], + [ + 1655, + 531 + ], + [ + 1667, + 566 + ], + [ + 1662, + 612 + ], + [ + 1654, + 649 + ], + [ + 1646, + 655 + ], + [ + 1626, + 655 + ], + [ + 1613, + 644 + ], + [ + 1612, + 633 + ], + [ + 1594, + 628 + ], + [ + 1563, + 621 + ], + [ + 1504, + 622 + ], + [ + 1448, + 622 + ], + [ + 1433, + 621 + ], + [ + 1421, + 622 + ], + [ + 1420, + 630 + ], + [ + 1419, + 638 + ], + [ + 1411, + 648 + ], + [ + 1399, + 648 + ], + [ + 1386, + 643 + ], + [ + 1382, + 612 + ], + [ + 1382, + 608 + ], + [ + 1373, + 606 + ], + [ + 1365, + 616 + ], + [ + 1353, + 620 + ], + [ + 1340, + 618 + ], + [ + 1335, + 612 + ], + [ + 1334, + 575 + ], + [ + 1335, + 533 + ], + [ + 1339, + 520 + ], + [ + 1352, + 495 + ], + [ + 1347, + 490 + ], + [ + 1337, + 485 + ], + [ + 1333, + 481 + ], + [ + 1333, + 469 + ], + [ + 1343, + 469 + ], + [ + 1354, + 471 + ], + [ + 1360, + 481 + ], + [ + 1363, + 473 + ], + [ + 1369, + 461 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 964, + 500 + ], + [ + 1039, + 504 + ], + [ + 1037, + 520 + ], + [ + 966, + 519 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1492, + 510 + ], + [ + 1584, + 511 + ], + [ + 1583, + 533 + ], + [ + 1495, + 532 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000030_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000030_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8f7ad07e08d7bb7e7f6add3819beed5a8c42ca38 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000030_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000031_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000031_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..557ac17866febdc432e4139145a4d4caf140e303 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000031_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000031_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000031_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..2a08e3160d07523e36d4cacb2129a73eae6c6439 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000031_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000032_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000032_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..184f0f5a3dc5761e89fb8313affae1b5318bae30 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000032_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000032_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000032_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..991f372faf1ee48e455c17da061e8b36b3d1cae4 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000032_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000033_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000033_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..3614972e33dde9e634ecc13e120fe8ab1a947bcb Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000033_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000034_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000034_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..36e16fffbd45e74c9d8edbdfabe0364b42ebaa6b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000034_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000034_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000034_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..474e7352f1b5d65c5c78c3433031493b822c7977 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000034_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000035_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000035_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..324d245fa037806792a6427b3eeb32e6da0ea506 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000035_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000036_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000036_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..b916e73b6b859e5bdf5de1c52b72145a63dd3190 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000036_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000036_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000036_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8b68d278b9febc4585917b8396ec5e2844f51c59 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000036_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000037_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000037_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..1451bc89d8a247aa264c96ec2910eeb6bf549ed3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000037_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000040_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000040_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..85e2056b02d6875813af16e4fef3ea8dc40a45ae Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000040_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000041_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000041_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..b7a7babc5b98353cab0fce4e497516181f5d913a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000041_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000041_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000041_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..554b055080f84918aea9e735496a137b0e0dd6dc Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000041_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000042_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000042_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..5a764b66a5296d43c1dac5bd2bc327fb592fbbd5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000042_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000042_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000042_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..904d0d80ab838d542f8661843c59dab0c0fdf60e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000042_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000043_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000043_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..072dccb65902bcdc6819a525491dd48455aa4c5b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000043_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000045_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000045_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..3746d2bb4a7326d0328810f0ab1910844b2b49f9 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000045_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000046_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000046_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..18095895cf87063e039612b4e3524a88ddbf013c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000046_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000048_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000048_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..47f416c2bfd85140ed7343e63106e66161e0a82f Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000048_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000048_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000048_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..3b9664a3783cca3b55f9b797c1302ed06d429a54 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000048_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000049_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000049_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..acdf3afa7814fef18e5d1e79a39e8dab89116648 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000049_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000049_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000049_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..669021d61439b6a89af3444a9347cc05087bf730 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000049_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000052_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000052_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..79af7334fc3eadbec598c86706a63fbcad381ca8 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000052_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000052_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000052_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..bd8c95f555dc63cf857b9ef2dd3c489316ea1e3f Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000052_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000054_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000054_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5e637774d6b28019b02f159c08591e4da7795f8b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000054_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000054_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000054_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..3364eeefa181e47a7d855d81cbe2d59d7fc99a52 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000054_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000055_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000055_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..da1ada41b3c1a7a3c587a87f6d4241c9e237ee95 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000055_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000056_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000056_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..b414e9c8b7c472f320afd93368028c47d6c3c62b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000056_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000056_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000056_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..0d168b2fdb908035d4a03b98f38c06807ed47b49 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000056_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000058_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000058_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d68a5b64e35effe2288e9c1b95c423f3bbc86d29 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000058_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000058_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000058_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..4d10c9f3f7fc1e20b2bd1061e488cb0a28cc1e14 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000058_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000059_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000059_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ff76fdcab9278190edc6288a269527ca98a3c9b5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000059_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000059_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000059_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..e878ba13df15710c91584c04c7f39d8552246f6a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000059_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..285c78a038637b270816f5b1a3129bc7c0729dfc Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..2bf4881afb5270246e8a328f965f077bf51ceb61 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..690efbf912f086696deb53a679ee19be3ecdf81e --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000060_000019_gtFine_polygons.json @@ -0,0 +1,5631 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1071, + 141 + ], + [ + 999, + 252 + ], + [ + 938, + 257 + ], + [ + 899, + 76 + ], + [ + 895, + 0 + ], + [ + 1084, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 991, + 457 + ], + [ + 933, + 459 + ], + [ + 693, + 484 + ], + [ + 518, + 508 + ], + [ + 0, + 540 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 520 + ], + [ + 1696, + 512 + ], + [ + 1151, + 478 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 179, + 547 + ], + [ + 52, + 551 + ], + [ + 0, + 552 + ], + [ + 0, + 519 + ], + [ + 55, + 513 + ], + [ + 408, + 488 + ], + [ + 698, + 475 + ], + [ + 715, + 488 + ], + [ + 673, + 494 + ], + [ + 602, + 503 + ], + [ + 737, + 507 + ], + [ + 767, + 509 + ], + [ + 776, + 510 + ], + [ + 778, + 517 + ], + [ + 767, + 520 + ], + [ + 761, + 523 + ], + [ + 748, + 526 + ], + [ + 731, + 531 + ], + [ + 722, + 533 + ], + [ + 650, + 536 + ], + [ + 468, + 540 + ], + [ + 405, + 542 + ], + [ + 352, + 543 + ], + [ + 265, + 545 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2048, + 505 + ], + [ + 1728, + 505 + ], + [ + 989, + 460 + ], + [ + 976, + 440 + ], + [ + 1013, + 373 + ], + [ + 1260, + 0 + ], + [ + 2048, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1023, + 488 + ], + [ + 995, + 476 + ], + [ + 990, + 463 + ], + [ + 1036, + 465 + ], + [ + 1295, + 465 + ], + [ + 1405, + 472 + ], + [ + 1494, + 475 + ], + [ + 1584, + 477 + ], + [ + 1638, + 485 + ], + [ + 1667, + 485 + ], + [ + 1741, + 484 + ], + [ + 1783, + 482 + ], + [ + 1880, + 480 + ], + [ + 1943, + 483 + ], + [ + 2048, + 496 + ], + [ + 2048, + 550 + ], + [ + 1863, + 554 + ], + [ + 1576, + 560 + ], + [ + 1488, + 560 + ], + [ + 1408, + 559 + ], + [ + 1364, + 555 + ], + [ + 1365, + 540 + ], + [ + 1374, + 539 + ], + [ + 1382, + 538 + ], + [ + 1357, + 537 + ], + [ + 1258, + 536 + ], + [ + 1193, + 533 + ], + [ + 1142, + 532 + ], + [ + 1115, + 529 + ], + [ + 1101, + 522 + ], + [ + 1078, + 512 + ], + [ + 1047, + 498 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1956, + 521 + ], + [ + 1955, + 499 + ], + [ + 1952, + 498 + ], + [ + 1951, + 463 + ], + [ + 1956, + 459 + ], + [ + 1949, + 257 + ], + [ + 1952, + 250 + ], + [ + 1951, + 194 + ], + [ + 1945, + 191 + ], + [ + 1947, + 189 + ], + [ + 1951, + 189 + ], + [ + 1947, + 1 + ], + [ + 1960, + 0 + ], + [ + 1965, + 126 + ], + [ + 1971, + 113 + ], + [ + 1978, + 104 + ], + [ + 1983, + 97 + ], + [ + 2045, + 43 + ], + [ + 2048, + 42 + ], + [ + 2047, + 48 + ], + [ + 2041, + 52 + ], + [ + 1985, + 104 + ], + [ + 1978, + 115 + ], + [ + 1971, + 131 + ], + [ + 1967, + 149 + ], + [ + 1967, + 186 + ], + [ + 1969, + 189 + ], + [ + 1969, + 191 + ], + [ + 1967, + 194 + ], + [ + 1969, + 253 + ], + [ + 1973, + 257 + ], + [ + 1972, + 259 + ], + [ + 1968, + 261 + ], + [ + 1970, + 266 + ], + [ + 1969, + 278 + ], + [ + 1971, + 354 + ], + [ + 1977, + 486 + ], + [ + 1974, + 521 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1965, + 526 + ], + [ + 1960, + 522 + ], + [ + 1961, + 491 + ], + [ + 1966, + 485 + ], + [ + 1976, + 483 + ], + [ + 2027, + 484 + ], + [ + 2032, + 487 + ], + [ + 2034, + 495 + ], + [ + 2034, + 525 + ], + [ + 2029, + 525 + ], + [ + 2028, + 531 + ], + [ + 1966, + 529 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1847, + 529 + ], + [ + 1840, + 231 + ], + [ + 1806, + 239 + ], + [ + 1806, + 246 + ], + [ + 1812, + 528 + ], + [ + 1819, + 528 + ], + [ + 1816, + 411 + ], + [ + 1837, + 409 + ], + [ + 1840, + 529 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1812, + 247 + ], + [ + 1813, + 284 + ], + [ + 1834, + 279 + ], + [ + 1834, + 241 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1814, + 348 + ], + [ + 1813, + 288 + ], + [ + 1835, + 283 + ], + [ + 1836, + 345 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1835, + 350 + ], + [ + 1813, + 353 + ], + [ + 1814, + 402 + ], + [ + 1837, + 401 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 2008, + 286 + ], + [ + 1992, + 285 + ], + [ + 1988, + 280 + ], + [ + 1976, + 279 + ], + [ + 1978, + 350 + ], + [ + 1990, + 349 + ], + [ + 1992, + 342 + ], + [ + 2006, + 339 + ], + [ + 2009, + 336 + ], + [ + 2009, + 332 + ], + [ + 2009, + 329 + ], + [ + 1991, + 328 + ], + [ + 1991, + 321 + ], + [ + 2004, + 318 + ], + [ + 2009, + 316 + ], + [ + 2009, + 312 + ], + [ + 2007, + 308 + ], + [ + 1991, + 308 + ], + [ + 1991, + 299 + ], + [ + 2003, + 296 + ], + [ + 2008, + 291 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1978, + 277 + ], + [ + 1963, + 278 + ], + [ + 1966, + 350 + ], + [ + 1979, + 349 + ], + [ + 1988, + 345 + ], + [ + 1990, + 338 + ], + [ + 1988, + 329 + ], + [ + 1985, + 327 + ], + [ + 1989, + 322 + ], + [ + 1991, + 313 + ], + [ + 1990, + 308 + ], + [ + 1985, + 304 + ], + [ + 1990, + 300 + ], + [ + 1991, + 294 + ], + [ + 1989, + 285 + ], + [ + 1984, + 281 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1914, + 348 + ], + [ + 1913, + 332 + ], + [ + 1908, + 329 + ], + [ + 1907, + 324 + ], + [ + 1908, + 320 + ], + [ + 1913, + 318 + ], + [ + 1913, + 304 + ], + [ + 1908, + 301 + ], + [ + 1906, + 296 + ], + [ + 1907, + 292 + ], + [ + 1911, + 290 + ], + [ + 1914, + 288 + ], + [ + 1914, + 283 + ], + [ + 1915, + 278 + ], + [ + 1919, + 276 + ], + [ + 1946, + 273 + ], + [ + 1951, + 275 + ], + [ + 1952, + 280 + ], + [ + 1953, + 288 + ], + [ + 1955, + 342 + ], + [ + 1953, + 348 + ], + [ + 1949, + 351 + ], + [ + 1920, + 354 + ], + [ + 1916, + 353 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1736, + 527 + ], + [ + 1735, + 493 + ], + [ + 1738, + 488 + ], + [ + 1801, + 486 + ], + [ + 1806, + 490 + ], + [ + 1806, + 524 + ], + [ + 1803, + 531 + ], + [ + 1802, + 532 + ], + [ + 1739, + 532 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1560, + 323 + ], + [ + 1563, + 441 + ], + [ + 1564, + 449 + ], + [ + 1565, + 523 + ], + [ + 1575, + 527 + ], + [ + 1571, + 364 + ], + [ + 1569, + 319 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1621, + 457 + ], + [ + 1623, + 466 + ], + [ + 1622, + 526 + ], + [ + 1630, + 526 + ], + [ + 1629, + 465 + ], + [ + 1631, + 457 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1503, + 355 + ], + [ + 1510, + 538 + ], + [ + 1518, + 538 + ], + [ + 1510, + 355 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1455, + 459 + ], + [ + 1456, + 466 + ], + [ + 1459, + 525 + ], + [ + 1466, + 526 + ], + [ + 1463, + 465 + ], + [ + 1465, + 458 + ], + [ + 1462, + 456 + ], + [ + 1457, + 456 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1372, + 439 + ], + [ + 1374, + 495 + ], + [ + 1377, + 495 + ], + [ + 1375, + 439 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1345, + 462 + ], + [ + 1348, + 470 + ], + [ + 1348, + 524 + ], + [ + 1354, + 524 + ], + [ + 1351, + 469 + ], + [ + 1354, + 462 + ], + [ + 1351, + 460 + ], + [ + 1347, + 460 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1491, + 421 + ], + [ + 1496, + 507 + ], + [ + 1528, + 507 + ], + [ + 1524, + 421 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1497, + 370 + ], + [ + 1491, + 374 + ], + [ + 1485, + 382 + ], + [ + 1483, + 393 + ], + [ + 1484, + 402 + ], + [ + 1489, + 410 + ], + [ + 1496, + 417 + ], + [ + 1501, + 420 + ], + [ + 1514, + 418 + ], + [ + 1522, + 413 + ], + [ + 1527, + 403 + ], + [ + 1528, + 390 + ], + [ + 1524, + 378 + ], + [ + 1516, + 371 + ], + [ + 1507, + 368 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 402, + 503 + ], + [ + 322, + 508 + ], + [ + 271, + 516 + ], + [ + 215, + 523 + ], + [ + 160, + 522 + ], + [ + 159, + 498 + ], + [ + 172, + 498 + ], + [ + 234, + 485 + ], + [ + 409, + 479 + ], + [ + 405, + 501 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 133, + 466 + ], + [ + 61, + 460 + ], + [ + 64, + 504 + ], + [ + 59, + 516 + ], + [ + 52, + 524 + ], + [ + 0, + 526 + ], + [ + 0, + 0 + ], + [ + 930, + 0 + ], + [ + 947, + 54 + ], + [ + 943, + 75 + ], + [ + 944, + 80 + ], + [ + 936, + 83 + ], + [ + 927, + 80 + ], + [ + 923, + 85 + ], + [ + 932, + 96 + ], + [ + 938, + 112 + ], + [ + 942, + 120 + ], + [ + 948, + 118 + ], + [ + 957, + 126 + ], + [ + 960, + 139 + ], + [ + 954, + 146 + ], + [ + 949, + 147 + ], + [ + 949, + 160 + ], + [ + 957, + 160 + ], + [ + 963, + 166 + ], + [ + 964, + 183 + ], + [ + 971, + 188 + ], + [ + 965, + 199 + ], + [ + 967, + 206 + ], + [ + 968, + 212 + ], + [ + 975, + 202 + ], + [ + 986, + 202 + ], + [ + 994, + 193 + ], + [ + 1000, + 193 + ], + [ + 1002, + 189 + ], + [ + 1001, + 184 + ], + [ + 990, + 179 + ], + [ + 993, + 175 + ], + [ + 997, + 173 + ], + [ + 1011, + 157 + ], + [ + 1002, + 155 + ], + [ + 993, + 165 + ], + [ + 983, + 172 + ], + [ + 970, + 167 + ], + [ + 977, + 160 + ], + [ + 981, + 152 + ], + [ + 965, + 146 + ], + [ + 954, + 140 + ], + [ + 948, + 132 + ], + [ + 959, + 127 + ], + [ + 966, + 124 + ], + [ + 966, + 119 + ], + [ + 966, + 115 + ], + [ + 960, + 115 + ], + [ + 951, + 113 + ], + [ + 947, + 105 + ], + [ + 962, + 109 + ], + [ + 960, + 101 + ], + [ + 969, + 107 + ], + [ + 976, + 98 + ], + [ + 978, + 109 + ], + [ + 988, + 111 + ], + [ + 994, + 115 + ], + [ + 1002, + 114 + ], + [ + 1029, + 109 + ], + [ + 1030, + 105 + ], + [ + 1024, + 102 + ], + [ + 1017, + 105 + ], + [ + 1012, + 102 + ], + [ + 1016, + 98 + ], + [ + 1013, + 93 + ], + [ + 993, + 90 + ], + [ + 1008, + 82 + ], + [ + 1003, + 60 + ], + [ + 1031, + 64 + ], + [ + 1017, + 32 + ], + [ + 979, + 30 + ], + [ + 925, + 0 + ], + [ + 1425, + 0 + ], + [ + 1447, + 34 + ], + [ + 1475, + 39 + ], + [ + 1495, + 83 + ], + [ + 1486, + 111 + ], + [ + 1454, + 105 + ], + [ + 1449, + 111 + ], + [ + 1485, + 135 + ], + [ + 1471, + 147 + ], + [ + 1456, + 136 + ], + [ + 1407, + 131 + ], + [ + 1412, + 140 + ], + [ + 1432, + 148 + ], + [ + 1436, + 160 + ], + [ + 1406, + 160 + ], + [ + 1377, + 148 + ], + [ + 1381, + 159 + ], + [ + 1393, + 176 + ], + [ + 1390, + 187 + ], + [ + 1378, + 187 + ], + [ + 1366, + 188 + ], + [ + 1366, + 196 + ], + [ + 1357, + 210 + ], + [ + 1363, + 226 + ], + [ + 1366, + 268 + ], + [ + 1361, + 293 + ], + [ + 1348, + 294 + ], + [ + 1338, + 291 + ], + [ + 1349, + 268 + ], + [ + 1350, + 243 + ], + [ + 1342, + 218 + ], + [ + 1304, + 228 + ], + [ + 1292, + 229 + ], + [ + 1300, + 240 + ], + [ + 1283, + 239 + ], + [ + 1281, + 248 + ], + [ + 1284, + 252 + ], + [ + 1280, + 255 + ], + [ + 1281, + 263 + ], + [ + 1276, + 268 + ], + [ + 1273, + 272 + ], + [ + 1274, + 277 + ], + [ + 1269, + 287 + ], + [ + 1265, + 293 + ], + [ + 1261, + 303 + ], + [ + 1256, + 307 + ], + [ + 1253, + 324 + ], + [ + 1255, + 344 + ], + [ + 1256, + 360 + ], + [ + 1261, + 389 + ], + [ + 1265, + 438 + ], + [ + 1265, + 446 + ], + [ + 1260, + 464 + ], + [ + 1240, + 464 + ], + [ + 1240, + 423 + ], + [ + 1235, + 370 + ], + [ + 1232, + 352 + ], + [ + 1228, + 338 + ], + [ + 1223, + 319 + ], + [ + 1213, + 325 + ], + [ + 1199, + 323 + ], + [ + 1184, + 327 + ], + [ + 1176, + 327 + ], + [ + 1171, + 329 + ], + [ + 1178, + 334 + ], + [ + 1182, + 437 + ], + [ + 1151, + 442 + ], + [ + 1152, + 411 + ], + [ + 1149, + 397 + ], + [ + 1152, + 369 + ], + [ + 1151, + 355 + ], + [ + 1151, + 348 + ], + [ + 1149, + 337 + ], + [ + 1148, + 331 + ], + [ + 1143, + 329 + ], + [ + 1142, + 334 + ], + [ + 1140, + 353 + ], + [ + 1140, + 364 + ], + [ + 1137, + 371 + ], + [ + 1139, + 379 + ], + [ + 1137, + 389 + ], + [ + 1139, + 398 + ], + [ + 1136, + 408 + ], + [ + 1134, + 437 + ], + [ + 1120, + 443 + ], + [ + 1121, + 426 + ], + [ + 1107, + 426 + ], + [ + 1106, + 422 + ], + [ + 1110, + 418 + ], + [ + 1122, + 411 + ], + [ + 1112, + 400 + ], + [ + 1121, + 400 + ], + [ + 1119, + 389 + ], + [ + 1111, + 392 + ], + [ + 1109, + 382 + ], + [ + 1094, + 382 + ], + [ + 1081, + 371 + ], + [ + 1080, + 394 + ], + [ + 1082, + 417 + ], + [ + 1083, + 434 + ], + [ + 1090, + 442 + ], + [ + 1095, + 448 + ], + [ + 1036, + 481 + ], + [ + 1021, + 481 + ], + [ + 1017, + 477 + ], + [ + 1001, + 474 + ], + [ + 1005, + 472 + ], + [ + 1005, + 470 + ], + [ + 993, + 469 + ], + [ + 949, + 462 + ], + [ + 892, + 466 + ], + [ + 691, + 483 + ], + [ + 615, + 486 + ], + [ + 576, + 492 + ], + [ + 568, + 492 + ], + [ + 520, + 493 + ], + [ + 477, + 498 + ], + [ + 454, + 501 + ], + [ + 423, + 502 + ], + [ + 389, + 478 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 404, + 462 + ], + [ + 277, + 460 + ], + [ + 239, + 462 + ], + [ + 169, + 465 + ], + [ + 168, + 511 + ], + [ + 167, + 518 + ], + [ + 219, + 517 + ], + [ + 283, + 514 + ], + [ + 322, + 508 + ], + [ + 355, + 501 + ], + [ + 380, + 499 + ], + [ + 407, + 496 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 124, + 418 + ], + [ + 119, + 426 + ], + [ + 119, + 431 + ], + [ + 129, + 433 + ], + [ + 129, + 448 + ], + [ + 77, + 449 + ], + [ + 63, + 454 + ], + [ + 66, + 523 + ], + [ + 158, + 522 + ], + [ + 173, + 506 + ], + [ + 178, + 502 + ], + [ + 175, + 415 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 115, + 712 + ], + [ + 0, + 751 + ], + [ + 0, + 661 + ], + [ + 46, + 662 + ], + [ + 86, + 665 + ], + [ + 108, + 668 + ], + [ + 120, + 673 + ], + [ + 125, + 675 + ], + [ + 129, + 704 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 31, + 485 + ], + [ + 32, + 470 + ], + [ + 29, + 453 + ], + [ + 24, + 423 + ], + [ + 20, + 412 + ], + [ + 19, + 403 + ], + [ + 17, + 400 + ], + [ + 11, + 400 + ], + [ + 5, + 403 + ], + [ + 0, + 407 + ], + [ + 0, + 525 + ], + [ + 9, + 525 + ], + [ + 14, + 506 + ], + [ + 17, + 491 + ], + [ + 27, + 497 + ], + [ + 38, + 515 + ], + [ + 34, + 525 + ], + [ + 40, + 525 + ], + [ + 47, + 521 + ], + [ + 52, + 516 + ], + [ + 45, + 505 + ], + [ + 38, + 494 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 51, + 415 + ], + [ + 55, + 412 + ], + [ + 62, + 411 + ], + [ + 68, + 411 + ], + [ + 72, + 413 + ], + [ + 74, + 421 + ], + [ + 72, + 428 + ], + [ + 74, + 431 + ], + [ + 77, + 438 + ], + [ + 78, + 454 + ], + [ + 78, + 474 + ], + [ + 74, + 475 + ], + [ + 71, + 480 + ], + [ + 71, + 494 + ], + [ + 74, + 504 + ], + [ + 76, + 515 + ], + [ + 76, + 521 + ], + [ + 70, + 527 + ], + [ + 55, + 527 + ], + [ + 54, + 525 + ], + [ + 60, + 523 + ], + [ + 62, + 517 + ], + [ + 59, + 500 + ], + [ + 56, + 480 + ], + [ + 55, + 469 + ], + [ + 52, + 462 + ], + [ + 54, + 457 + ], + [ + 45, + 455 + ], + [ + 42, + 453 + ], + [ + 43, + 451 + ], + [ + 46, + 449 + ], + [ + 52, + 449 + ], + [ + 54, + 443 + ], + [ + 57, + 436 + ], + [ + 56, + 423 + ], + [ + 53, + 418 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 196, + 457 + ], + [ + 196, + 446 + ], + [ + 193, + 435 + ], + [ + 189, + 431 + ], + [ + 187, + 427 + ], + [ + 188, + 422 + ], + [ + 186, + 417 + ], + [ + 182, + 414 + ], + [ + 176, + 415 + ], + [ + 172, + 422 + ], + [ + 173, + 427 + ], + [ + 175, + 432 + ], + [ + 172, + 443 + ], + [ + 171, + 451 + ], + [ + 174, + 477 + ], + [ + 190, + 493 + ], + [ + 204, + 505 + ], + [ + 207, + 512 + ], + [ + 211, + 517 + ], + [ + 217, + 506 + ], + [ + 197, + 486 + ], + [ + 196, + 479 + ], + [ + 200, + 473 + ], + [ + 200, + 468 + ], + [ + 197, + 462 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 143, + 450 + ], + [ + 141, + 442 + ], + [ + 142, + 433 + ], + [ + 139, + 430 + ], + [ + 140, + 425 + ], + [ + 139, + 417 + ], + [ + 141, + 412 + ], + [ + 144, + 410 + ], + [ + 151, + 409 + ], + [ + 156, + 410 + ], + [ + 161, + 420 + ], + [ + 159, + 424 + ], + [ + 165, + 428 + ], + [ + 171, + 440 + ], + [ + 173, + 446 + ], + [ + 172, + 449 + ], + [ + 167, + 451 + ], + [ + 170, + 457 + ], + [ + 174, + 467 + ], + [ + 175, + 476 + ], + [ + 166, + 476 + ], + [ + 160, + 489 + ], + [ + 165, + 498 + ], + [ + 173, + 502 + ], + [ + 174, + 512 + ], + [ + 161, + 524 + ], + [ + 150, + 525 + ], + [ + 145, + 524 + ], + [ + 147, + 522 + ], + [ + 150, + 520 + ], + [ + 153, + 519 + ], + [ + 155, + 515 + ], + [ + 153, + 499 + ], + [ + 145, + 494 + ], + [ + 144, + 490 + ], + [ + 143, + 479 + ], + [ + 143, + 471 + ], + [ + 139, + 471 + ], + [ + 137, + 469 + ], + [ + 138, + 464 + ], + [ + 142, + 454 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 209, + 424 + ], + [ + 207, + 413 + ], + [ + 200, + 413 + ], + [ + 192, + 416 + ], + [ + 187, + 419 + ], + [ + 190, + 422 + ], + [ + 189, + 427 + ], + [ + 193, + 429 + ], + [ + 194, + 432 + ], + [ + 196, + 433 + ], + [ + 200, + 438 + ], + [ + 199, + 444 + ], + [ + 200, + 464 + ], + [ + 198, + 475 + ], + [ + 201, + 476 + ], + [ + 202, + 479 + ], + [ + 198, + 487 + ], + [ + 198, + 491 + ], + [ + 203, + 495 + ], + [ + 210, + 505 + ], + [ + 210, + 515 + ], + [ + 208, + 519 + ], + [ + 205, + 523 + ], + [ + 208, + 526 + ], + [ + 218, + 527 + ], + [ + 226, + 525 + ], + [ + 224, + 521 + ], + [ + 228, + 517 + ], + [ + 233, + 510 + ], + [ + 225, + 501 + ], + [ + 221, + 495 + ], + [ + 221, + 485 + ], + [ + 225, + 474 + ], + [ + 231, + 472 + ], + [ + 233, + 464 + ], + [ + 238, + 461 + ], + [ + 238, + 453 + ], + [ + 234, + 446 + ], + [ + 220, + 432 + ], + [ + 216, + 428 + ], + [ + 211, + 426 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 398, + 435 + ], + [ + 404, + 432 + ], + [ + 427, + 433 + ], + [ + 430, + 439 + ], + [ + 436, + 502 + ], + [ + 402, + 504 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 489, + 381 + ], + [ + 495, + 502 + ], + [ + 499, + 502 + ], + [ + 492, + 381 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 495, + 286 + ], + [ + 505, + 495 + ], + [ + 512, + 495 + ], + [ + 499, + 289 + ], + [ + 498, + 284 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 584, + 293 + ], + [ + 593, + 489 + ], + [ + 598, + 489 + ], + [ + 588, + 293 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 573, + 395 + ], + [ + 567, + 493 + ], + [ + 571, + 492 + ], + [ + 576, + 396 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 654, + 324 + ], + [ + 658, + 486 + ], + [ + 662, + 486 + ], + [ + 657, + 323 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 825, + 430 + ], + [ + 824, + 416 + ], + [ + 815, + 416 + ], + [ + 816, + 430 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 751, + 375 + ], + [ + 754, + 446 + ], + [ + 757, + 446 + ], + [ + 753, + 375 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 715, + 439 + ], + [ + 715, + 466 + ], + [ + 727, + 461 + ], + [ + 727, + 439 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 717, + 388 + ], + [ + 718, + 410 + ], + [ + 732, + 409 + ], + [ + 730, + 387 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 698, + 352 + ], + [ + 698, + 467 + ], + [ + 701, + 467 + ], + [ + 701, + 352 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 165, + 282 + ], + [ + 176, + 475 + ], + [ + 173, + 485 + ], + [ + 175, + 543 + ], + [ + 189, + 542 + ], + [ + 187, + 485 + ], + [ + 184, + 475 + ], + [ + 173, + 282 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 248, + 521 + ], + [ + 230, + 177 + ], + [ + 222, + 177 + ], + [ + 238, + 521 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 205, + 155 + ], + [ + 215, + 159 + ], + [ + 218, + 170 + ], + [ + 222, + 181 + ], + [ + 223, + 183 + ], + [ + 228, + 183 + ], + [ + 229, + 211 + ], + [ + 247, + 211 + ], + [ + 245, + 188 + ], + [ + 240, + 184 + ], + [ + 228, + 185 + ], + [ + 230, + 176 + ], + [ + 229, + 169 + ], + [ + 233, + 155 + ], + [ + 233, + 148 + ], + [ + 239, + 144 + ], + [ + 246, + 139 + ], + [ + 238, + 128 + ], + [ + 220, + 116 + ], + [ + 217, + 120 + ], + [ + 219, + 126 + ], + [ + 217, + 135 + ], + [ + 214, + 135 + ], + [ + 211, + 143 + ], + [ + 206, + 149 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 156, + 300 + ], + [ + 141, + 302 + ], + [ + 141, + 305 + ], + [ + 130, + 308 + ], + [ + 128, + 311 + ], + [ + 128, + 315 + ], + [ + 129, + 318 + ], + [ + 132, + 320 + ], + [ + 141, + 321 + ], + [ + 143, + 328 + ], + [ + 130, + 329 + ], + [ + 129, + 334 + ], + [ + 131, + 338 + ], + [ + 144, + 342 + ], + [ + 145, + 348 + ], + [ + 132, + 351 + ], + [ + 130, + 356 + ], + [ + 133, + 360 + ], + [ + 148, + 364 + ], + [ + 153, + 372 + ], + [ + 171, + 373 + ], + [ + 167, + 300 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 196, + 298 + ], + [ + 184, + 298 + ], + [ + 174, + 300 + ], + [ + 172, + 306 + ], + [ + 171, + 310 + ], + [ + 171, + 314 + ], + [ + 174, + 318 + ], + [ + 175, + 326 + ], + [ + 173, + 331 + ], + [ + 174, + 336 + ], + [ + 177, + 341 + ], + [ + 177, + 348 + ], + [ + 175, + 355 + ], + [ + 176, + 362 + ], + [ + 179, + 366 + ], + [ + 184, + 370 + ], + [ + 201, + 368 + ], + [ + 197, + 299 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 346, + 528 + ], + [ + 340, + 485 + ], + [ + 333, + 478 + ], + [ + 326, + 487 + ], + [ + 324, + 527 + ], + [ + 315, + 533 + ], + [ + 314, + 537 + ], + [ + 344, + 536 + ], + [ + 356, + 533 + ], + [ + 349, + 528 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 209, + 276 + ], + [ + 202, + 280 + ], + [ + 197, + 290 + ], + [ + 196, + 302 + ], + [ + 198, + 311 + ], + [ + 203, + 320 + ], + [ + 211, + 324 + ], + [ + 219, + 324 + ], + [ + 226, + 320 + ], + [ + 230, + 311 + ], + [ + 231, + 298 + ], + [ + 228, + 286 + ], + [ + 219, + 277 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 213, + 330 + ], + [ + 209, + 333 + ], + [ + 206, + 342 + ], + [ + 206, + 354 + ], + [ + 210, + 361 + ], + [ + 215, + 363 + ], + [ + 223, + 361 + ], + [ + 226, + 356 + ], + [ + 228, + 345 + ], + [ + 225, + 334 + ], + [ + 220, + 330 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 486, + 338 + ], + [ + 487, + 360 + ], + [ + 497, + 361 + ], + [ + 496, + 339 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 488, + 372 + ], + [ + 488, + 360 + ], + [ + 498, + 361 + ], + [ + 497, + 373 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 494, + 383 + ], + [ + 488, + 383 + ], + [ + 488, + 371 + ], + [ + 499, + 372 + ], + [ + 499, + 383 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 486, + 278 + ], + [ + 485, + 282 + ], + [ + 490, + 286 + ], + [ + 491, + 292 + ], + [ + 495, + 296 + ], + [ + 500, + 296 + ], + [ + 503, + 294 + ], + [ + 505, + 288 + ], + [ + 505, + 284 + ], + [ + 503, + 281 + ], + [ + 498, + 280 + ], + [ + 490, + 278 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 571, + 378 + ], + [ + 571, + 397 + ], + [ + 582, + 397 + ], + [ + 583, + 379 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 582, + 363 + ], + [ + 571, + 361 + ], + [ + 571, + 378 + ], + [ + 582, + 379 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 668, + 387 + ], + [ + 659, + 386 + ], + [ + 659, + 400 + ], + [ + 668, + 400 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 668, + 416 + ], + [ + 668, + 400 + ], + [ + 659, + 400 + ], + [ + 660, + 415 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 622, + 270 + ], + [ + 623, + 314 + ], + [ + 623, + 326 + ], + [ + 626, + 382 + ], + [ + 630, + 466 + ], + [ + 628, + 474 + ], + [ + 631, + 521 + ], + [ + 642, + 521 + ], + [ + 640, + 475 + ], + [ + 638, + 466 + ], + [ + 629, + 288 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 639, + 272 + ], + [ + 622, + 268 + ], + [ + 622, + 289 + ], + [ + 639, + 293 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 608, + 300 + ], + [ + 608, + 311 + ], + [ + 645, + 310 + ], + [ + 644, + 299 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 619, + 321 + ], + [ + 621, + 318 + ], + [ + 632, + 319 + ], + [ + 632, + 326 + ], + [ + 643, + 327 + ], + [ + 645, + 330 + ], + [ + 645, + 333 + ], + [ + 643, + 336 + ], + [ + 632, + 337 + ], + [ + 633, + 344 + ], + [ + 643, + 345 + ], + [ + 646, + 348 + ], + [ + 645, + 352 + ], + [ + 633, + 355 + ], + [ + 633, + 362 + ], + [ + 643, + 363 + ], + [ + 644, + 366 + ], + [ + 644, + 369 + ], + [ + 635, + 373 + ], + [ + 635, + 376 + ], + [ + 632, + 386 + ], + [ + 626, + 386 + ], + [ + 622, + 376 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 899, + 430 + ], + [ + 900, + 458 + ], + [ + 869, + 452 + ], + [ + 845, + 438 + ], + [ + 852, + 433 + ], + [ + 861, + 431 + ], + [ + 884, + 430 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 940, + 455 + ], + [ + 934, + 452 + ], + [ + 910, + 453 + ], + [ + 899, + 453 + ], + [ + 893, + 450 + ], + [ + 888, + 448 + ], + [ + 885, + 445 + ], + [ + 876, + 446 + ], + [ + 869, + 473 + ], + [ + 876, + 481 + ], + [ + 892, + 480 + ], + [ + 913, + 474 + ], + [ + 916, + 471 + ], + [ + 923, + 470 + ], + [ + 930, + 471 + ], + [ + 936, + 468 + ], + [ + 942, + 465 + ], + [ + 946, + 464 + ], + [ + 946, + 458 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 909, + 417 + ], + [ + 906, + 425 + ], + [ + 914, + 425 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 915, + 430 + ], + [ + 915, + 425 + ], + [ + 906, + 425 + ], + [ + 906, + 431 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 914, + 432 + ], + [ + 912, + 429 + ], + [ + 909, + 429 + ], + [ + 907, + 431 + ], + [ + 907, + 436 + ], + [ + 910, + 437 + ], + [ + 913, + 436 + ], + [ + 914, + 434 + ], + [ + 914, + 433 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 947, + 473 + ], + [ + 948, + 463 + ], + [ + 946, + 462 + ], + [ + 946, + 458 + ], + [ + 950, + 457 + ], + [ + 951, + 456 + ], + [ + 959, + 448 + ], + [ + 962, + 448 + ], + [ + 984, + 448 + ], + [ + 990, + 450 + ], + [ + 992, + 456 + ], + [ + 994, + 458 + ], + [ + 996, + 459 + ], + [ + 997, + 461 + ], + [ + 998, + 465 + ], + [ + 998, + 468 + ], + [ + 998, + 475 + ], + [ + 998, + 487 + ], + [ + 996, + 488 + ], + [ + 990, + 488 + ], + [ + 990, + 484 + ], + [ + 956, + 484 + ], + [ + 955, + 488 + ], + [ + 948, + 488 + ], + [ + 948, + 480 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 887, + 469 + ], + [ + 884, + 449 + ], + [ + 882, + 440 + ], + [ + 881, + 437 + ], + [ + 879, + 436 + ], + [ + 851, + 437 + ], + [ + 844, + 447 + ], + [ + 866, + 485 + ], + [ + 877, + 482 + ], + [ + 879, + 477 + ], + [ + 886, + 475 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 842, + 436 + ], + [ + 804, + 438 + ], + [ + 801, + 441 + ], + [ + 853, + 487 + ], + [ + 861, + 485 + ], + [ + 864, + 481 + ], + [ + 872, + 480 + ], + [ + 876, + 474 + ], + [ + 875, + 464 + ], + [ + 871, + 454 + ], + [ + 866, + 449 + ], + [ + 856, + 448 + ], + [ + 854, + 445 + ], + [ + 854, + 441 + ], + [ + 853, + 438 + ], + [ + 851, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 840, + 443 + ], + [ + 829, + 442 + ], + [ + 849, + 485 + ], + [ + 857, + 481 + ], + [ + 859, + 475 + ], + [ + 856, + 464 + ], + [ + 855, + 458 + ], + [ + 850, + 448 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 824, + 440 + ], + [ + 803, + 439 + ], + [ + 790, + 440 + ], + [ + 776, + 442 + ], + [ + 769, + 448 + ], + [ + 781, + 491 + ], + [ + 790, + 494 + ], + [ + 800, + 491 + ], + [ + 819, + 491 + ], + [ + 836, + 490 + ], + [ + 843, + 490 + ], + [ + 849, + 488 + ], + [ + 852, + 481 + ], + [ + 849, + 467 + ], + [ + 846, + 455 + ], + [ + 840, + 445 + ], + [ + 835, + 442 + ], + [ + 826, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 761, + 443 + ], + [ + 748, + 444 + ], + [ + 739, + 447 + ], + [ + 728, + 453 + ], + [ + 721, + 457 + ], + [ + 719, + 460 + ], + [ + 714, + 462 + ], + [ + 701, + 465 + ], + [ + 690, + 469 + ], + [ + 687, + 476 + ], + [ + 688, + 483 + ], + [ + 692, + 489 + ], + [ + 696, + 492 + ], + [ + 702, + 492 + ], + [ + 712, + 492 + ], + [ + 737, + 493 + ], + [ + 769, + 492 + ], + [ + 779, + 494 + ], + [ + 803, + 494 + ], + [ + 817, + 492 + ], + [ + 825, + 486 + ], + [ + 827, + 471 + ], + [ + 823, + 459 + ], + [ + 813, + 453 + ], + [ + 799, + 444 + ], + [ + 778, + 443 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1064, + 444 + ], + [ + 1046, + 446 + ], + [ + 1041, + 452 + ], + [ + 1040, + 456 + ], + [ + 1037, + 458 + ], + [ + 1036, + 461 + ], + [ + 1035, + 473 + ], + [ + 1035, + 481 + ], + [ + 1037, + 485 + ], + [ + 1041, + 486 + ], + [ + 1050, + 485 + ], + [ + 1058, + 466 + ], + [ + 1059, + 456 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1097, + 442 + ], + [ + 1087, + 441 + ], + [ + 1067, + 443 + ], + [ + 1057, + 446 + ], + [ + 1053, + 450 + ], + [ + 1051, + 456 + ], + [ + 1047, + 456 + ], + [ + 1046, + 460 + ], + [ + 1049, + 461 + ], + [ + 1048, + 472 + ], + [ + 1047, + 480 + ], + [ + 1047, + 484 + ], + [ + 1049, + 486 + ], + [ + 1055, + 485 + ], + [ + 1062, + 477 + ], + [ + 1067, + 466 + ], + [ + 1072, + 462 + ], + [ + 1098, + 445 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1085, + 489 + ], + [ + 1077, + 489 + ], + [ + 1070, + 486 + ], + [ + 1067, + 473 + ], + [ + 1065, + 463 + ], + [ + 1066, + 457 + ], + [ + 1069, + 452 + ], + [ + 1077, + 450 + ], + [ + 1107, + 437 + ], + [ + 1121, + 433 + ], + [ + 1145, + 429 + ], + [ + 1166, + 429 + ], + [ + 1183, + 433 + ], + [ + 1135, + 482 + ], + [ + 1096, + 499 + ], + [ + 1091, + 496 + ], + [ + 1087, + 492 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1224, + 430 + ], + [ + 1204, + 428 + ], + [ + 1168, + 430 + ], + [ + 1150, + 433 + ], + [ + 1142, + 436 + ], + [ + 1131, + 444 + ], + [ + 1119, + 459 + ], + [ + 1118, + 476 + ], + [ + 1145, + 492 + ], + [ + 1241, + 469 + ], + [ + 1241, + 439 + ], + [ + 1233, + 434 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1090, + 503 + ], + [ + 1096, + 496 + ], + [ + 1093, + 485 + ], + [ + 1091, + 473 + ], + [ + 1091, + 462 + ], + [ + 1099, + 455 + ], + [ + 1112, + 445 + ], + [ + 1117, + 443 + ], + [ + 1123, + 439 + ], + [ + 1134, + 444 + ], + [ + 1139, + 454 + ], + [ + 1145, + 455 + ], + [ + 1150, + 448 + ], + [ + 1154, + 443 + ], + [ + 1160, + 446 + ], + [ + 1162, + 451 + ], + [ + 1156, + 457 + ], + [ + 1157, + 461 + ], + [ + 1162, + 460 + ], + [ + 1166, + 452 + ], + [ + 1168, + 451 + ], + [ + 1197, + 440 + ], + [ + 1202, + 440 + ], + [ + 1205, + 450 + ], + [ + 1212, + 454 + ], + [ + 1211, + 446 + ], + [ + 1214, + 439 + ], + [ + 1230, + 443 + ], + [ + 1238, + 450 + ], + [ + 1241, + 463 + ], + [ + 1254, + 462 + ], + [ + 1263, + 452 + ], + [ + 1271, + 442 + ], + [ + 1279, + 440 + ], + [ + 1292, + 443 + ], + [ + 1303, + 449 + ], + [ + 1310, + 461 + ], + [ + 1310, + 465 + ], + [ + 1297, + 466 + ], + [ + 1298, + 473 + ], + [ + 1296, + 480 + ], + [ + 1295, + 482 + ], + [ + 1304, + 483 + ], + [ + 1313, + 486 + ], + [ + 1320, + 492 + ], + [ + 1317, + 496 + ], + [ + 1269, + 500 + ], + [ + 1172, + 506 + ], + [ + 1163, + 510 + ], + [ + 1152, + 508 + ], + [ + 1129, + 510 + ], + [ + 1099, + 512 + ], + [ + 1091, + 511 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1114, + 0 + ], + [ + 1127, + 511 + ], + [ + 1138, + 511 + ], + [ + 1119, + 0 + ] + ] + }, + { + "label": "polegroup", + "polygon": [ + [ + 1154, + 516 + ], + [ + 1156, + 508 + ], + [ + 1153, + 464 + ], + [ + 1144, + 460 + ], + [ + 1138, + 459 + ], + [ + 1140, + 474 + ], + [ + 1142, + 512 + ], + [ + 1148, + 515 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1224, + 449 + ], + [ + 1227, + 504 + ], + [ + 1230, + 503 + ], + [ + 1227, + 449 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1297, + 445 + ], + [ + 1300, + 499 + ], + [ + 1303, + 499 + ], + [ + 1300, + 445 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1267, + 524 + ], + [ + 1261, + 461 + ], + [ + 1256, + 461 + ], + [ + 1260, + 524 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1194, + 444 + ], + [ + 1180, + 444 + ], + [ + 1174, + 444 + ], + [ + 1175, + 482 + ], + [ + 1183, + 481 + ], + [ + 1192, + 479 + ], + [ + 1194, + 476 + ], + [ + 1196, + 448 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1179, + 522 + ], + [ + 1177, + 473 + ], + [ + 1175, + 466 + ], + [ + 1174, + 381 + ], + [ + 1172, + 273 + ], + [ + 1167, + 273 + ], + [ + 1167, + 303 + ], + [ + 1169, + 465 + ], + [ + 1167, + 473 + ], + [ + 1168, + 522 + ], + [ + 1173, + 523 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1162, + 304 + ], + [ + 1161, + 323 + ], + [ + 1173, + 320 + ], + [ + 1174, + 300 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1162, + 278 + ], + [ + 1162, + 294 + ], + [ + 1167, + 295 + ], + [ + 1167, + 277 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1233, + 290 + ], + [ + 1231, + 287 + ], + [ + 1225, + 286 + ], + [ + 1172, + 286 + ], + [ + 1172, + 304 + ], + [ + 1192, + 304 + ], + [ + 1193, + 311 + ], + [ + 1194, + 312 + ], + [ + 1213, + 312 + ], + [ + 1214, + 309 + ], + [ + 1214, + 303 + ], + [ + 1229, + 303 + ], + [ + 1232, + 302 + ], + [ + 1233, + 300 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1827, + 732 + ], + [ + 1890, + 715 + ], + [ + 1981, + 700 + ], + [ + 2048, + 692 + ], + [ + 2048, + 1023 + ], + [ + 2013, + 1023 + ], + [ + 1836, + 913 + ], + [ + 1787, + 877 + ], + [ + 1753, + 843 + ], + [ + 1742, + 820 + ], + [ + 1741, + 794 + ], + [ + 1752, + 770 + ], + [ + 1777, + 752 + ], + [ + 1812, + 737 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000061_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000061_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..41e3825f146eb90af0f2d1ac11e158001226428a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000061_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000061_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000061_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..639446c53a796474c854071aefe9862d18b97fe4 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000061_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..eb644995c12496a45ebfd1e8cc5b1ecb182c4c92 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..9ccd8c1f94275975cedae45acc717ada04786878 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..8dce48f8f132b75a23b86d7865f801f61ddabb2b --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000062_000019_gtFine_polygons.json @@ -0,0 +1,6052 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1327, + 0 + ], + [ + 806, + 0 + ], + [ + 833, + 168 + ], + [ + 1024, + 383 + ], + [ + 1155, + 371 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 540, + 528 + ], + [ + 0, + 526 + ], + [ + 0, + 1 + ], + [ + 849, + 1 + ], + [ + 849, + 47 + ], + [ + 876, + 48 + ], + [ + 887, + 70 + ], + [ + 886, + 118 + ], + [ + 909, + 157 + ], + [ + 946, + 156 + ], + [ + 1047, + 341 + ], + [ + 1061, + 334 + ], + [ + 1070, + 332 + ], + [ + 1079, + 335 + ], + [ + 1082, + 338 + ], + [ + 1089, + 339 + ], + [ + 1105, + 339 + ], + [ + 1105, + 335 + ], + [ + 1107, + 337 + ], + [ + 1121, + 337 + ], + [ + 1121, + 330 + ], + [ + 1140, + 295 + ], + [ + 1145, + 293 + ], + [ + 1155, + 276 + ], + [ + 1159, + 277 + ], + [ + 1164, + 264 + ], + [ + 1164, + 196 + ], + [ + 1179, + 162 + ], + [ + 1183, + 162 + ], + [ + 1195, + 140 + ], + [ + 1215, + 138 + ], + [ + 1214, + 104 + ], + [ + 1231, + 62 + ], + [ + 1230, + 53 + ], + [ + 1254, + 1 + ], + [ + 2048, + 1 + ], + [ + 2047, + 524 + ], + [ + 1566, + 528 + ], + [ + 1345, + 502 + ], + [ + 726, + 525 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1099, + 458 + ], + [ + 967, + 467 + ], + [ + 938, + 469 + ], + [ + 851, + 471 + ], + [ + 635, + 520 + ], + [ + 0, + 532 + ], + [ + 0, + 1024 + ], + [ + 2047, + 1024 + ], + [ + 2047, + 534 + ], + [ + 1521, + 527 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 965, + 471 + ], + [ + 930, + 474 + ], + [ + 903, + 475 + ], + [ + 842, + 474 + ], + [ + 853, + 469 + ], + [ + 873, + 469 + ], + [ + 891, + 467 + ], + [ + 918, + 464 + ], + [ + 970, + 461 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 431, + 546 + ], + [ + 181, + 550 + ], + [ + 49, + 550 + ], + [ + 0, + 553 + ], + [ + 0, + 509 + ], + [ + 158, + 509 + ], + [ + 253, + 510 + ], + [ + 403, + 511 + ], + [ + 539, + 515 + ], + [ + 563, + 507 + ], + [ + 600, + 505 + ], + [ + 635, + 501 + ], + [ + 735, + 491 + ], + [ + 751, + 491 + ], + [ + 788, + 486 + ], + [ + 842, + 474 + ], + [ + 909, + 475 + ], + [ + 929, + 477 + ], + [ + 929, + 480 + ], + [ + 922, + 482 + ], + [ + 793, + 517 + ], + [ + 742, + 532 + ], + [ + 700, + 539 + ], + [ + 615, + 542 + ], + [ + 497, + 545 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 241, + 631 + ], + [ + 0, + 691 + ], + [ + 0, + 551 + ], + [ + 45, + 554 + ], + [ + 103, + 555 + ], + [ + 188, + 558 + ], + [ + 310, + 562 + ], + [ + 344, + 565 + ], + [ + 364, + 569 + ], + [ + 379, + 574 + ], + [ + 382, + 581 + ], + [ + 382, + 596 + ], + [ + 322, + 612 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1454, + 541 + ], + [ + 1380, + 530 + ], + [ + 1324, + 524 + ], + [ + 1395, + 490 + ], + [ + 1429, + 491 + ], + [ + 1500, + 499 + ], + [ + 1564, + 507 + ], + [ + 1565, + 514 + ], + [ + 1574, + 515 + ], + [ + 1631, + 514 + ], + [ + 1690, + 511 + ], + [ + 1831, + 511 + ], + [ + 1930, + 510 + ], + [ + 1985, + 509 + ], + [ + 2047, + 507 + ], + [ + 2047, + 547 + ], + [ + 1779, + 545 + ], + [ + 1593, + 543 + ], + [ + 1509, + 543 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2019, + 608 + ], + [ + 2021, + 587 + ], + [ + 2032, + 579 + ], + [ + 2047, + 577 + ], + [ + 2047, + 617 + ], + [ + 2028, + 613 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 973, + 446 + ], + [ + 977, + 424 + ], + [ + 979, + 415 + ], + [ + 986, + 415 + ], + [ + 990, + 426 + ], + [ + 997, + 426 + ], + [ + 1006, + 421 + ], + [ + 1018, + 417 + ], + [ + 1023, + 412 + ], + [ + 1032, + 410 + ], + [ + 1038, + 414 + ], + [ + 1041, + 414 + ], + [ + 1042, + 405 + ], + [ + 1034, + 398 + ], + [ + 1034, + 386 + ], + [ + 1040, + 388 + ], + [ + 1044, + 384 + ], + [ + 1065, + 387 + ], + [ + 1068, + 384 + ], + [ + 1063, + 378 + ], + [ + 1067, + 369 + ], + [ + 1064, + 353 + ], + [ + 1061, + 340 + ], + [ + 1056, + 337 + ], + [ + 1048, + 331 + ], + [ + 1054, + 324 + ], + [ + 1059, + 321 + ], + [ + 1056, + 314 + ], + [ + 1049, + 313 + ], + [ + 1043, + 317 + ], + [ + 1040, + 317 + ], + [ + 1045, + 307 + ], + [ + 1045, + 305 + ], + [ + 1052, + 302 + ], + [ + 1052, + 299 + ], + [ + 1047, + 293 + ], + [ + 1054, + 290 + ], + [ + 1048, + 279 + ], + [ + 1052, + 273 + ], + [ + 1050, + 269 + ], + [ + 1039, + 265 + ], + [ + 1027, + 266 + ], + [ + 1033, + 255 + ], + [ + 1033, + 250 + ], + [ + 1028, + 246 + ], + [ + 1031, + 242 + ], + [ + 1031, + 235 + ], + [ + 1023, + 228 + ], + [ + 1018, + 225 + ], + [ + 1024, + 221 + ], + [ + 1026, + 214 + ], + [ + 1019, + 212 + ], + [ + 1019, + 206 + ], + [ + 1009, + 203 + ], + [ + 1021, + 195 + ], + [ + 1017, + 187 + ], + [ + 1009, + 187 + ], + [ + 994, + 173 + ], + [ + 989, + 174 + ], + [ + 985, + 169 + ], + [ + 978, + 157 + ], + [ + 971, + 152 + ], + [ + 968, + 158 + ], + [ + 939, + 143 + ], + [ + 934, + 154 + ], + [ + 930, + 170 + ], + [ + 924, + 181 + ], + [ + 917, + 185 + ], + [ + 912, + 191 + ], + [ + 901, + 205 + ], + [ + 903, + 212 + ], + [ + 891, + 217 + ], + [ + 885, + 226 + ], + [ + 885, + 245 + ], + [ + 892, + 245 + ], + [ + 902, + 248 + ], + [ + 902, + 251 + ], + [ + 893, + 259 + ], + [ + 892, + 266 + ], + [ + 892, + 271 + ], + [ + 902, + 271 + ], + [ + 908, + 273 + ], + [ + 902, + 280 + ], + [ + 896, + 288 + ], + [ + 890, + 295 + ], + [ + 886, + 303 + ], + [ + 892, + 303 + ], + [ + 886, + 318 + ], + [ + 886, + 327 + ], + [ + 894, + 329 + ], + [ + 890, + 346 + ], + [ + 899, + 352 + ], + [ + 908, + 352 + ], + [ + 916, + 351 + ], + [ + 922, + 351 + ], + [ + 930, + 357 + ], + [ + 929, + 456 + ], + [ + 969, + 455 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 963, + 431 + ], + [ + 957, + 424 + ], + [ + 944, + 416 + ], + [ + 914, + 410 + ], + [ + 889, + 422 + ], + [ + 889, + 424 + ], + [ + 903, + 426 + ], + [ + 906, + 429 + ], + [ + 925, + 431 + ], + [ + 953, + 431 + ], + [ + 957, + 432 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 919, + 457 + ], + [ + 921, + 441 + ], + [ + 921, + 427 + ], + [ + 922, + 421 + ], + [ + 926, + 388 + ], + [ + 929, + 369 + ], + [ + 930, + 357 + ], + [ + 936, + 352 + ], + [ + 942, + 361 + ], + [ + 945, + 380 + ], + [ + 945, + 395 + ], + [ + 936, + 421 + ], + [ + 934, + 456 + ], + [ + 931, + 462 + ], + [ + 920, + 460 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 915, + 394 + ], + [ + 916, + 420 + ], + [ + 946, + 419 + ], + [ + 945, + 394 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 893, + 453 + ], + [ + 889, + 460 + ], + [ + 890, + 467 + ], + [ + 904, + 468 + ], + [ + 925, + 467 + ], + [ + 937, + 469 + ], + [ + 957, + 466 + ], + [ + 966, + 465 + ], + [ + 967, + 452 + ], + [ + 959, + 440 + ], + [ + 939, + 441 + ], + [ + 924, + 447 + ], + [ + 916, + 443 + ], + [ + 906, + 441 + ], + [ + 895, + 444 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 909, + 356 + ], + [ + 909, + 350 + ], + [ + 912, + 345 + ], + [ + 916, + 344 + ], + [ + 920, + 344 + ], + [ + 924, + 345 + ], + [ + 926, + 351 + ], + [ + 927, + 361 + ], + [ + 925, + 362 + ], + [ + 924, + 353 + ], + [ + 922, + 347 + ], + [ + 918, + 346 + ], + [ + 914, + 347 + ], + [ + 911, + 351 + ], + [ + 911, + 477 + ], + [ + 907, + 477 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 921, + 366 + ], + [ + 923, + 370 + ], + [ + 928, + 370 + ], + [ + 930, + 366 + ], + [ + 926, + 360 + ], + [ + 925, + 360 + ] + ] + }, + { + "label": "truck", + "polygon": [ + [ + 1097, + 442 + ], + [ + 1097, + 433 + ], + [ + 1057, + 433 + ], + [ + 1055, + 444 + ], + [ + 1049, + 446 + ], + [ + 1043, + 453 + ], + [ + 1049, + 463 + ], + [ + 1068, + 462 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1015, + 443 + ], + [ + 1002, + 444 + ], + [ + 997, + 442 + ], + [ + 989, + 445 + ], + [ + 1000, + 465 + ], + [ + 1010, + 464 + ], + [ + 1017, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 971, + 471 + ], + [ + 963, + 471 + ], + [ + 963, + 462 + ], + [ + 963, + 458 + ], + [ + 962, + 456 + ], + [ + 962, + 454 + ], + [ + 965, + 452 + ], + [ + 971, + 442 + ], + [ + 994, + 442 + ], + [ + 1000, + 452 + ], + [ + 1001, + 460 + ], + [ + 1000, + 470 + ], + [ + 994, + 470 + ], + [ + 994, + 469 + ], + [ + 976, + 468 + ], + [ + 975, + 470 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1051, + 465 + ], + [ + 1050, + 457 + ], + [ + 1049, + 451 + ], + [ + 1045, + 445 + ], + [ + 1042, + 441 + ], + [ + 1015, + 441 + ], + [ + 1011, + 448 + ], + [ + 1004, + 454 + ], + [ + 1002, + 460 + ], + [ + 1003, + 471 + ], + [ + 1009, + 471 + ], + [ + 1010, + 468 + ], + [ + 1012, + 468 + ], + [ + 1013, + 471 + ], + [ + 1018, + 471 + ], + [ + 1019, + 468 + ], + [ + 1033, + 468 + ], + [ + 1035, + 470 + ], + [ + 1037, + 471 + ], + [ + 1041, + 471 + ], + [ + 1042, + 467 + ], + [ + 1044, + 467 + ], + [ + 1046, + 471 + ], + [ + 1050, + 471 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1208, + 442 + ], + [ + 1196, + 441 + ], + [ + 1187, + 443 + ], + [ + 1169, + 442 + ], + [ + 1163, + 443 + ], + [ + 1159, + 442 + ], + [ + 1154, + 442 + ], + [ + 1138, + 442 + ], + [ + 1132, + 444 + ], + [ + 1144, + 478 + ], + [ + 1149, + 482 + ], + [ + 1154, + 484 + ], + [ + 1207, + 451 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1192, + 451 + ], + [ + 1191, + 440 + ], + [ + 1190, + 436 + ], + [ + 1190, + 431 + ], + [ + 1188, + 429 + ], + [ + 1183, + 429 + ], + [ + 1181, + 435 + ], + [ + 1176, + 440 + ], + [ + 1173, + 451 + ], + [ + 1176, + 453 + ], + [ + 1174, + 460 + ], + [ + 1174, + 469 + ], + [ + 1189, + 462 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1150, + 390 + ], + [ + 1150, + 409 + ], + [ + 1167, + 409 + ], + [ + 1167, + 390 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1190, + 413 + ], + [ + 1191, + 403 + ], + [ + 1200, + 404 + ], + [ + 1199, + 413 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1199, + 423 + ], + [ + 1199, + 413 + ], + [ + 1190, + 413 + ], + [ + 1189, + 423 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1209, + 405 + ], + [ + 1201, + 405 + ], + [ + 1201, + 419 + ], + [ + 1209, + 419 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1201, + 396 + ], + [ + 1201, + 405 + ], + [ + 1209, + 405 + ], + [ + 1209, + 395 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1235, + 405 + ], + [ + 1236, + 384 + ], + [ + 1228, + 384 + ], + [ + 1228, + 379 + ], + [ + 1231, + 380 + ], + [ + 1233, + 368 + ], + [ + 1221, + 369 + ], + [ + 1220, + 378 + ], + [ + 1224, + 379 + ], + [ + 1224, + 384 + ], + [ + 1222, + 385 + ], + [ + 1221, + 405 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1342, + 426 + ], + [ + 1331, + 417 + ], + [ + 1319, + 416 + ], + [ + 1309, + 420 + ], + [ + 1305, + 429 + ], + [ + 1336, + 432 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1361, + 432 + ], + [ + 1356, + 423 + ], + [ + 1353, + 417 + ], + [ + 1350, + 416 + ], + [ + 1345, + 418 + ], + [ + 1344, + 424 + ], + [ + 1341, + 428 + ], + [ + 1345, + 435 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1362, + 401 + ], + [ + 1362, + 433 + ], + [ + 1365, + 436 + ], + [ + 1365, + 400 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1355, + 378 + ], + [ + 1354, + 402 + ], + [ + 1374, + 403 + ], + [ + 1374, + 378 + ], + [ + 1365, + 373 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1354, + 358 + ], + [ + 1354, + 378 + ], + [ + 1374, + 378 + ], + [ + 1375, + 358 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1481, + 431 + ], + [ + 1477, + 422 + ], + [ + 1472, + 422 + ], + [ + 1466, + 427 + ], + [ + 1464, + 439 + ], + [ + 1463, + 467 + ], + [ + 1470, + 484 + ], + [ + 1485, + 469 + ], + [ + 1487, + 461 + ], + [ + 1492, + 451 + ], + [ + 1492, + 445 + ], + [ + 1485, + 434 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1444, + 426 + ], + [ + 1436, + 438 + ], + [ + 1435, + 456 + ], + [ + 1436, + 482 + ], + [ + 1439, + 496 + ], + [ + 1447, + 506 + ], + [ + 1448, + 477 + ], + [ + 1452, + 470 + ], + [ + 1456, + 483 + ], + [ + 1456, + 501 + ], + [ + 1460, + 507 + ], + [ + 1465, + 507 + ], + [ + 1464, + 499 + ], + [ + 1463, + 472 + ], + [ + 1467, + 465 + ], + [ + 1468, + 444 + ], + [ + 1462, + 428 + ], + [ + 1458, + 423 + ], + [ + 1456, + 416 + ], + [ + 1454, + 412 + ], + [ + 1448, + 412 + ], + [ + 1444, + 418 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1397, + 514 + ], + [ + 1399, + 497 + ], + [ + 1399, + 472 + ], + [ + 1391, + 454 + ], + [ + 1377, + 434 + ], + [ + 1369, + 429 + ], + [ + 1336, + 424 + ], + [ + 1298, + 423 + ], + [ + 1254, + 425 + ], + [ + 1221, + 433 + ], + [ + 1190, + 451 + ], + [ + 1181, + 453 + ], + [ + 1178, + 455 + ], + [ + 1175, + 461 + ], + [ + 1162, + 467 + ], + [ + 1158, + 469 + ], + [ + 1151, + 481 + ], + [ + 1149, + 489 + ], + [ + 1147, + 505 + ], + [ + 1148, + 516 + ], + [ + 1152, + 521 + ], + [ + 1156, + 521 + ], + [ + 1159, + 527 + ], + [ + 1163, + 531 + ], + [ + 1174, + 531 + ], + [ + 1178, + 528 + ], + [ + 1181, + 522 + ], + [ + 1201, + 523 + ], + [ + 1206, + 524 + ], + [ + 1218, + 523 + ], + [ + 1238, + 523 + ], + [ + 1241, + 528 + ], + [ + 1244, + 531 + ], + [ + 1252, + 531 + ], + [ + 1257, + 528 + ], + [ + 1259, + 524 + ], + [ + 1276, + 525 + ], + [ + 1279, + 531 + ], + [ + 1288, + 535 + ], + [ + 1296, + 535 + ], + [ + 1303, + 533 + ], + [ + 1311, + 524 + ], + [ + 1355, + 524 + ], + [ + 1360, + 533 + ], + [ + 1367, + 536 + ], + [ + 1378, + 536 + ], + [ + 1387, + 531 + ], + [ + 1390, + 523 + ], + [ + 1392, + 518 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1062, + 486 + ], + [ + 1063, + 468 + ], + [ + 1066, + 461 + ], + [ + 1062, + 459 + ], + [ + 1059, + 458 + ], + [ + 1058, + 455 + ], + [ + 1059, + 453 + ], + [ + 1064, + 452 + ], + [ + 1067, + 453 + ], + [ + 1069, + 456 + ], + [ + 1074, + 448 + ], + [ + 1077, + 443 + ], + [ + 1082, + 440 + ], + [ + 1089, + 439 + ], + [ + 1108, + 438 + ], + [ + 1126, + 440 + ], + [ + 1133, + 444 + ], + [ + 1138, + 453 + ], + [ + 1141, + 461 + ], + [ + 1145, + 470 + ], + [ + 1145, + 483 + ], + [ + 1144, + 496 + ], + [ + 1142, + 502 + ], + [ + 1130, + 503 + ], + [ + 1125, + 499 + ], + [ + 1125, + 495 + ], + [ + 1098, + 494 + ], + [ + 1078, + 493 + ], + [ + 1078, + 499 + ], + [ + 1077, + 501 + ], + [ + 1070, + 501 + ], + [ + 1063, + 497 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1097, + 471 + ], + [ + 1097, + 465 + ], + [ + 1120, + 465 + ], + [ + 1120, + 471 + ], + [ + 1099, + 471 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1351, + 476 + ], + [ + 1351, + 467 + ], + [ + 1379, + 468 + ], + [ + 1379, + 477 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 888, + 338 + ], + [ + 888, + 326 + ], + [ + 883, + 321 + ], + [ + 879, + 320 + ], + [ + 874, + 321 + ], + [ + 870, + 324 + ], + [ + 869, + 328 + ], + [ + 869, + 455 + ], + [ + 870, + 459 + ], + [ + 870, + 485 + ], + [ + 865, + 485 + ], + [ + 865, + 459 + ], + [ + 866, + 456 + ], + [ + 867, + 326 + ], + [ + 869, + 322 + ], + [ + 871, + 320 + ], + [ + 874, + 318 + ], + [ + 879, + 318 + ], + [ + 883, + 319 + ], + [ + 887, + 322 + ], + [ + 889, + 326 + ], + [ + 889, + 341 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 884, + 343 + ], + [ + 887, + 347 + ], + [ + 892, + 347 + ], + [ + 894, + 343 + ], + [ + 891, + 339 + ], + [ + 890, + 335 + ], + [ + 886, + 335 + ], + [ + 886, + 338 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 826, + 441 + ], + [ + 824, + 434 + ], + [ + 822, + 432 + ], + [ + 823, + 426 + ], + [ + 820, + 421 + ], + [ + 815, + 421 + ], + [ + 813, + 426 + ], + [ + 814, + 430 + ], + [ + 809, + 432 + ], + [ + 807, + 435 + ], + [ + 807, + 445 + ], + [ + 814, + 486 + ], + [ + 817, + 485 + ], + [ + 819, + 480 + ], + [ + 819, + 473 + ], + [ + 822, + 464 + ], + [ + 822, + 458 + ], + [ + 823, + 449 + ], + [ + 826, + 453 + ], + [ + 828, + 445 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 841, + 444 + ], + [ + 838, + 439 + ], + [ + 837, + 433 + ], + [ + 835, + 430 + ], + [ + 832, + 430 + ], + [ + 829, + 433 + ], + [ + 828, + 440 + ], + [ + 826, + 441 + ], + [ + 825, + 450 + ], + [ + 826, + 459 + ], + [ + 827, + 465 + ], + [ + 828, + 472 + ], + [ + 831, + 481 + ], + [ + 830, + 487 + ], + [ + 832, + 489 + ], + [ + 833, + 491 + ], + [ + 837, + 491 + ], + [ + 840, + 489 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 844, + 424 + ], + [ + 841, + 418 + ], + [ + 825, + 410 + ], + [ + 810, + 411 + ], + [ + 800, + 418 + ], + [ + 805, + 421 + ], + [ + 825, + 424 + ], + [ + 839, + 425 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 780, + 436 + ], + [ + 775, + 447 + ], + [ + 775, + 453 + ], + [ + 779, + 456 + ], + [ + 779, + 466 + ], + [ + 781, + 471 + ], + [ + 782, + 483 + ], + [ + 779, + 489 + ], + [ + 782, + 489 + ], + [ + 786, + 489 + ], + [ + 788, + 487 + ], + [ + 790, + 481 + ], + [ + 792, + 478 + ], + [ + 795, + 479 + ], + [ + 800, + 477 + ], + [ + 799, + 469 + ], + [ + 797, + 459 + ], + [ + 793, + 448 + ], + [ + 788, + 436 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 779, + 428 + ], + [ + 767, + 433 + ], + [ + 761, + 439 + ], + [ + 772, + 440 + ], + [ + 784, + 438 + ], + [ + 800, + 435 + ], + [ + 792, + 430 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 835, + 303 + ], + [ + 837, + 447 + ], + [ + 838, + 455 + ], + [ + 841, + 491 + ], + [ + 834, + 492 + ], + [ + 832, + 449 + ], + [ + 834, + 442 + ], + [ + 832, + 307 + ], + [ + 834, + 297 + ], + [ + 838, + 294 + ], + [ + 843, + 292 + ], + [ + 848, + 293 + ], + [ + 852, + 294 + ], + [ + 857, + 299 + ], + [ + 859, + 307 + ], + [ + 859, + 318 + ], + [ + 856, + 316 + ], + [ + 856, + 308 + ], + [ + 855, + 301 + ], + [ + 849, + 295 + ], + [ + 843, + 295 + ], + [ + 838, + 297 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 860, + 320 + ], + [ + 860, + 315 + ], + [ + 854, + 314 + ], + [ + 853, + 319 + ], + [ + 849, + 322 + ], + [ + 852, + 330 + ], + [ + 860, + 330 + ], + [ + 863, + 324 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 856, + 445 + ], + [ + 847, + 442 + ], + [ + 841, + 443 + ], + [ + 836, + 445 + ], + [ + 838, + 491 + ], + [ + 854, + 491 + ], + [ + 857, + 480 + ], + [ + 856, + 447 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 798, + 439 + ], + [ + 797, + 486 + ], + [ + 799, + 489 + ], + [ + 799, + 500 + ], + [ + 815, + 500 + ], + [ + 815, + 485 + ], + [ + 815, + 481 + ], + [ + 816, + 439 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 733, + 519 + ], + [ + 731, + 512 + ], + [ + 731, + 484 + ], + [ + 725, + 475 + ], + [ + 720, + 483 + ], + [ + 717, + 512 + ], + [ + 714, + 518 + ], + [ + 724, + 519 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 713, + 477 + ], + [ + 700, + 472 + ], + [ + 685, + 476 + ], + [ + 683, + 516 + ], + [ + 681, + 523 + ], + [ + 684, + 528 + ], + [ + 709, + 528 + ], + [ + 710, + 523 + ], + [ + 713, + 489 + ], + [ + 714, + 479 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 699, + 481 + ], + [ + 692, + 489 + ], + [ + 689, + 523 + ], + [ + 685, + 528 + ], + [ + 707, + 529 + ], + [ + 703, + 523 + ], + [ + 704, + 490 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 877, + 119 + ], + [ + 879, + 115 + ], + [ + 881, + 114 + ], + [ + 904, + 113 + ], + [ + 908, + 115 + ], + [ + 911, + 118 + ], + [ + 910, + 175 + ], + [ + 909, + 180 + ], + [ + 906, + 182 + ], + [ + 880, + 181 + ], + [ + 878, + 179 + ], + [ + 876, + 176 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 676, + 530 + ], + [ + 674, + 525 + ], + [ + 674, + 472 + ], + [ + 673, + 464 + ], + [ + 673, + 375 + ], + [ + 673, + 318 + ], + [ + 675, + 311 + ], + [ + 675, + 305 + ], + [ + 673, + 300 + ], + [ + 672, + 203 + ], + [ + 675, + 188 + ], + [ + 681, + 178 + ], + [ + 689, + 170 + ], + [ + 703, + 162 + ], + [ + 841, + 131 + ], + [ + 890, + 130 + ], + [ + 890, + 126 + ], + [ + 840, + 127 + ], + [ + 701, + 158 + ], + [ + 688, + 165 + ], + [ + 678, + 173 + ], + [ + 673, + 181 + ], + [ + 668, + 193 + ], + [ + 667, + 203 + ], + [ + 667, + 298 + ], + [ + 665, + 305 + ], + [ + 665, + 312 + ], + [ + 667, + 316 + ], + [ + 665, + 462 + ], + [ + 663, + 472 + ], + [ + 663, + 526 + ], + [ + 662, + 530 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 617, + 326 + ], + [ + 639, + 375 + ], + [ + 642, + 375 + ], + [ + 663, + 337 + ], + [ + 669, + 328 + ], + [ + 661, + 326 + ], + [ + 619, + 326 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 662, + 318 + ], + [ + 663, + 377 + ], + [ + 677, + 377 + ], + [ + 678, + 370 + ], + [ + 685, + 370 + ], + [ + 688, + 366 + ], + [ + 687, + 360 + ], + [ + 683, + 359 + ], + [ + 683, + 354 + ], + [ + 686, + 352 + ], + [ + 687, + 346 + ], + [ + 686, + 342 + ], + [ + 683, + 341 + ], + [ + 684, + 337 + ], + [ + 687, + 334 + ], + [ + 688, + 328 + ], + [ + 687, + 325 + ], + [ + 678, + 323 + ], + [ + 677, + 320 + ], + [ + 664, + 318 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 636, + 483 + ], + [ + 634, + 527 + ], + [ + 627, + 534 + ], + [ + 652, + 536 + ], + [ + 660, + 534 + ], + [ + 653, + 530 + ], + [ + 651, + 483 + ], + [ + 645, + 476 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 580, + 474 + ], + [ + 573, + 483 + ], + [ + 569, + 531 + ], + [ + 561, + 536 + ], + [ + 568, + 539 + ], + [ + 591, + 540 + ], + [ + 595, + 538 + ], + [ + 588, + 530 + ], + [ + 587, + 485 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 476, + 495 + ], + [ + 484, + 487 + ], + [ + 491, + 495 + ], + [ + 491, + 535 + ], + [ + 497, + 542 + ], + [ + 481, + 542 + ], + [ + 465, + 542 + ], + [ + 473, + 534 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 220, + 470 + ], + [ + 216, + 464 + ], + [ + 216, + 453 + ], + [ + 210, + 451 + ], + [ + 207, + 447 + ], + [ + 210, + 438 + ], + [ + 214, + 432 + ], + [ + 218, + 428 + ], + [ + 226, + 423 + ], + [ + 226, + 419 + ], + [ + 227, + 414 + ], + [ + 230, + 411 + ], + [ + 235, + 411 + ], + [ + 237, + 413 + ], + [ + 240, + 418 + ], + [ + 239, + 425 + ], + [ + 246, + 432 + ], + [ + 249, + 454 + ], + [ + 244, + 472 + ], + [ + 243, + 483 + ], + [ + 239, + 486 + ], + [ + 236, + 489 + ], + [ + 235, + 482 + ], + [ + 231, + 478 + ], + [ + 229, + 483 + ], + [ + 222, + 494 + ], + [ + 206, + 510 + ], + [ + 201, + 504 + ], + [ + 200, + 501 + ], + [ + 205, + 495 + ], + [ + 217, + 480 + ], + [ + 219, + 475 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 312, + 531 + ], + [ + 306, + 524 + ], + [ + 305, + 490 + ], + [ + 302, + 472 + ], + [ + 301, + 449 + ], + [ + 299, + 444 + ], + [ + 297, + 307 + ], + [ + 295, + 271 + ], + [ + 295, + 248 + ], + [ + 294, + 228 + ], + [ + 290, + 228 + ], + [ + 288, + 248 + ], + [ + 288, + 301 + ], + [ + 289, + 444 + ], + [ + 287, + 449 + ], + [ + 286, + 472 + ], + [ + 284, + 494 + ], + [ + 277, + 529 + ], + [ + 269, + 534 + ], + [ + 300, + 535 + ], + [ + 313, + 535 + ], + [ + 318, + 533 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 272, + 224 + ], + [ + 270, + 229 + ], + [ + 271, + 244 + ], + [ + 273, + 246 + ], + [ + 273, + 254 + ], + [ + 271, + 258 + ], + [ + 264, + 265 + ], + [ + 265, + 270 + ], + [ + 258, + 278 + ], + [ + 258, + 290 + ], + [ + 266, + 301 + ], + [ + 269, + 301 + ], + [ + 273, + 307 + ], + [ + 276, + 309 + ], + [ + 279, + 309 + ], + [ + 285, + 302 + ], + [ + 290, + 294 + ], + [ + 291, + 279 + ], + [ + 288, + 269 + ], + [ + 292, + 264 + ], + [ + 296, + 272 + ], + [ + 299, + 263 + ], + [ + 303, + 257 + ], + [ + 305, + 256 + ], + [ + 306, + 260 + ], + [ + 304, + 265 + ], + [ + 304, + 268 + ], + [ + 297, + 274 + ], + [ + 297, + 277 + ], + [ + 294, + 293 + ], + [ + 298, + 307 + ], + [ + 301, + 309 + ], + [ + 305, + 313 + ], + [ + 308, + 315 + ], + [ + 313, + 312 + ], + [ + 316, + 309 + ], + [ + 320, + 307 + ], + [ + 326, + 299 + ], + [ + 324, + 284 + ], + [ + 320, + 278 + ], + [ + 320, + 274 + ], + [ + 312, + 268 + ], + [ + 313, + 266 + ], + [ + 311, + 261 + ], + [ + 312, + 255 + ], + [ + 312, + 241 + ], + [ + 309, + 234 + ], + [ + 305, + 235 + ], + [ + 304, + 239 + ], + [ + 299, + 242 + ], + [ + 293, + 237 + ], + [ + 287, + 242 + ], + [ + 284, + 235 + ], + [ + 278, + 229 + ], + [ + 277, + 224 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 24, + 266 + ], + [ + 13, + 256 + ], + [ + 12, + 252 + ], + [ + 0, + 244 + ], + [ + 0, + 205 + ], + [ + 13, + 210 + ], + [ + 21, + 218 + ], + [ + 32, + 229 + ], + [ + 51, + 235 + ], + [ + 67, + 235 + ], + [ + 75, + 233 + ], + [ + 83, + 239 + ], + [ + 95, + 237 + ], + [ + 91, + 231 + ], + [ + 101, + 233 + ], + [ + 112, + 236 + ], + [ + 118, + 235 + ], + [ + 129, + 239 + ], + [ + 126, + 230 + ], + [ + 109, + 216 + ], + [ + 94, + 203 + ], + [ + 91, + 199 + ], + [ + 76, + 189 + ], + [ + 71, + 191 + ], + [ + 59, + 188 + ], + [ + 28, + 193 + ], + [ + 8, + 186 + ], + [ + 6, + 179 + ], + [ + 12, + 175 + ], + [ + 30, + 179 + ], + [ + 57, + 176 + ], + [ + 51, + 167 + ], + [ + 54, + 163 + ], + [ + 62, + 165 + ], + [ + 63, + 169 + ], + [ + 69, + 169 + ], + [ + 72, + 173 + ], + [ + 67, + 179 + ], + [ + 74, + 184 + ], + [ + 79, + 179 + ], + [ + 83, + 187 + ], + [ + 95, + 183 + ], + [ + 87, + 175 + ], + [ + 79, + 168 + ], + [ + 72, + 167 + ], + [ + 63, + 156 + ], + [ + 78, + 163 + ], + [ + 92, + 169 + ], + [ + 117, + 172 + ], + [ + 99, + 161 + ], + [ + 85, + 154 + ], + [ + 85, + 150 + ], + [ + 73, + 146 + ], + [ + 69, + 146 + ], + [ + 65, + 138 + ], + [ + 78, + 140 + ], + [ + 100, + 149 + ], + [ + 123, + 160 + ], + [ + 114, + 154 + ], + [ + 105, + 146 + ], + [ + 93, + 144 + ], + [ + 89, + 125 + ], + [ + 97, + 130 + ], + [ + 111, + 141 + ], + [ + 123, + 142 + ], + [ + 142, + 151 + ], + [ + 162, + 153 + ], + [ + 156, + 145 + ], + [ + 155, + 138 + ], + [ + 161, + 138 + ], + [ + 152, + 127 + ], + [ + 144, + 115 + ], + [ + 127, + 101 + ], + [ + 110, + 95 + ], + [ + 90, + 85 + ], + [ + 112, + 78 + ], + [ + 123, + 70 + ], + [ + 140, + 72 + ], + [ + 151, + 80 + ], + [ + 176, + 86 + ], + [ + 167, + 70 + ], + [ + 160, + 70 + ], + [ + 150, + 63 + ], + [ + 162, + 64 + ], + [ + 159, + 58 + ], + [ + 151, + 52 + ], + [ + 160, + 47 + ], + [ + 167, + 49 + ], + [ + 167, + 44 + ], + [ + 170, + 44 + ], + [ + 179, + 50 + ], + [ + 190, + 51 + ], + [ + 185, + 44 + ], + [ + 184, + 39 + ], + [ + 195, + 34 + ], + [ + 215, + 41 + ], + [ + 221, + 33 + ], + [ + 233, + 29 + ], + [ + 243, + 17 + ], + [ + 243, + 7 + ], + [ + 242, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 265 + ], + [ + 13, + 266 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 50, + 560 + ], + [ + 46, + 1 + ], + [ + 33, + 0 + ], + [ + 34, + 559 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 92, + 481 + ], + [ + 94, + 546 + ], + [ + 147, + 546 + ], + [ + 144, + 481 + ], + [ + 110, + 477 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 131, + 473 + ], + [ + 119, + 491 + ], + [ + 117, + 541 + ], + [ + 125, + 561 + ], + [ + 163, + 560 + ], + [ + 155, + 548 + ], + [ + 146, + 542 + ], + [ + 144, + 491 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 330, + 535 + ], + [ + 329, + 497 + ], + [ + 321, + 486 + ], + [ + 313, + 496 + ], + [ + 309, + 535 + ], + [ + 302, + 544 + ], + [ + 331, + 546 + ], + [ + 337, + 542 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 254, + 493 + ], + [ + 241, + 479 + ], + [ + 230, + 493 + ], + [ + 228, + 500 + ], + [ + 230, + 567 + ], + [ + 270, + 566 + ], + [ + 257, + 552 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 178, + 297 + ], + [ + 177, + 307 + ], + [ + 179, + 309 + ], + [ + 182, + 479 + ], + [ + 179, + 485 + ], + [ + 179, + 544 + ], + [ + 193, + 544 + ], + [ + 193, + 487 + ], + [ + 191, + 478 + ], + [ + 187, + 309 + ], + [ + 189, + 306 + ], + [ + 189, + 294 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 151, + 302 + ], + [ + 150, + 307 + ], + [ + 148, + 310 + ], + [ + 148, + 313 + ], + [ + 149, + 316 + ], + [ + 152, + 318 + ], + [ + 153, + 324 + ], + [ + 152, + 330 + ], + [ + 150, + 334 + ], + [ + 150, + 340 + ], + [ + 152, + 343 + ], + [ + 152, + 349 + ], + [ + 149, + 352 + ], + [ + 148, + 357 + ], + [ + 148, + 364 + ], + [ + 150, + 366 + ], + [ + 154, + 368 + ], + [ + 156, + 368 + ], + [ + 159, + 370 + ], + [ + 173, + 372 + ], + [ + 180, + 372 + ], + [ + 180, + 369 + ], + [ + 174, + 368 + ], + [ + 174, + 337 + ], + [ + 174, + 303 + ], + [ + 179, + 303 + ], + [ + 178, + 298 + ], + [ + 160, + 298 + ], + [ + 159, + 303 + ], + [ + 154, + 302 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 196, + 303 + ], + [ + 188, + 303 + ], + [ + 188, + 298 + ], + [ + 191, + 293 + ], + [ + 212, + 295 + ], + [ + 212, + 302 + ], + [ + 220, + 303 + ], + [ + 221, + 307 + ], + [ + 224, + 309 + ], + [ + 224, + 313 + ], + [ + 223, + 317 + ], + [ + 221, + 318 + ], + [ + 220, + 324 + ], + [ + 210, + 328 + ], + [ + 203, + 328 + ], + [ + 199, + 326 + ], + [ + 187, + 327 + ], + [ + 187, + 325 + ], + [ + 198, + 323 + ], + [ + 197, + 315 + ], + [ + 197, + 305 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 160, + 225 + ], + [ + 162, + 222 + ], + [ + 165, + 221 + ], + [ + 244, + 232 + ], + [ + 249, + 235 + ], + [ + 250, + 242 + ], + [ + 250, + 298 + ], + [ + 248, + 302 + ], + [ + 244, + 302 + ], + [ + 165, + 297 + ], + [ + 162, + 296 + ], + [ + 161, + 292 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 317, + 573 + ], + [ + 303, + 565 + ], + [ + 300, + 499 + ], + [ + 288, + 484 + ], + [ + 275, + 499 + ], + [ + 271, + 564 + ], + [ + 264, + 576 + ], + [ + 265, + 578 + ], + [ + 311, + 577 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 250, + 586 + ], + [ + 238, + 578 + ], + [ + 234, + 506 + ], + [ + 221, + 488 + ], + [ + 205, + 506 + ], + [ + 198, + 581 + ], + [ + 184, + 589 + ], + [ + 184, + 593 + ], + [ + 234, + 595 + ], + [ + 251, + 590 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 155, + 616 + ], + [ + 155, + 612 + ], + [ + 140, + 601 + ], + [ + 133, + 519 + ], + [ + 116, + 499 + ], + [ + 98, + 521 + ], + [ + 95, + 604 + ], + [ + 79, + 614 + ], + [ + 78, + 620 + ], + [ + 134, + 623 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1409, + 474 + ], + [ + 1403, + 481 + ], + [ + 1404, + 512 + ], + [ + 1401, + 516 + ], + [ + 1421, + 516 + ], + [ + 1418, + 511 + ], + [ + 1414, + 480 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1429, + 470 + ], + [ + 1423, + 477 + ], + [ + 1423, + 517 + ], + [ + 1421, + 521 + ], + [ + 1438, + 522 + ], + [ + 1441, + 520 + ], + [ + 1439, + 514 + ], + [ + 1436, + 477 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1444, + 469 + ], + [ + 1438, + 478 + ], + [ + 1441, + 519 + ], + [ + 1438, + 525 + ], + [ + 1438, + 525 + ], + [ + 1459, + 526 + ], + [ + 1459, + 524 + ], + [ + 1454, + 517 + ], + [ + 1449, + 477 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1466, + 528 + ], + [ + 1466, + 469 + ], + [ + 1469, + 462 + ], + [ + 1470, + 315 + ], + [ + 1469, + 313 + ], + [ + 1469, + 302 + ], + [ + 1478, + 302 + ], + [ + 1481, + 303 + ], + [ + 1480, + 318 + ], + [ + 1476, + 373 + ], + [ + 1475, + 461 + ], + [ + 1477, + 469 + ], + [ + 1477, + 482 + ], + [ + 1479, + 528 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1490, + 479 + ], + [ + 1483, + 472 + ], + [ + 1476, + 481 + ], + [ + 1476, + 524 + ], + [ + 1470, + 530 + ], + [ + 1470, + 531 + ], + [ + 1490, + 532 + ], + [ + 1496, + 527 + ], + [ + 1493, + 524 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1440, + 321 + ], + [ + 1435, + 325 + ], + [ + 1432, + 331 + ], + [ + 1432, + 339 + ], + [ + 1435, + 344 + ], + [ + 1439, + 347 + ], + [ + 1444, + 348 + ], + [ + 1452, + 347 + ], + [ + 1456, + 343 + ], + [ + 1459, + 335 + ], + [ + 1458, + 328 + ], + [ + 1453, + 322 + ], + [ + 1447, + 320 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1456, + 322 + ], + [ + 1456, + 326 + ], + [ + 1457, + 329 + ], + [ + 1468, + 334 + ], + [ + 1468, + 338 + ], + [ + 1455, + 341 + ], + [ + 1455, + 345 + ], + [ + 1456, + 348 + ], + [ + 1467, + 351 + ], + [ + 1467, + 356 + ], + [ + 1455, + 358 + ], + [ + 1455, + 362 + ], + [ + 1457, + 365 + ], + [ + 1467, + 367 + ], + [ + 1468, + 372 + ], + [ + 1473, + 374 + ], + [ + 1480, + 374 + ], + [ + 1481, + 344 + ], + [ + 1481, + 317 + ], + [ + 1470, + 317 + ], + [ + 1470, + 322 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1519, + 303 + ], + [ + 1481, + 302 + ], + [ + 1478, + 303 + ], + [ + 1477, + 316 + ], + [ + 1479, + 319 + ], + [ + 1489, + 321 + ], + [ + 1489, + 326 + ], + [ + 1490, + 328 + ], + [ + 1492, + 329 + ], + [ + 1508, + 329 + ], + [ + 1511, + 327 + ], + [ + 1511, + 320 + ], + [ + 1520, + 320 + ], + [ + 1522, + 318 + ], + [ + 1523, + 317 + ], + [ + 1523, + 306 + ], + [ + 1522, + 304 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1501, + 470 + ], + [ + 1496, + 482 + ], + [ + 1495, + 526 + ], + [ + 1488, + 531 + ], + [ + 1488, + 533 + ], + [ + 1493, + 535 + ], + [ + 1519, + 534 + ], + [ + 1513, + 525 + ], + [ + 1508, + 480 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1541, + 469 + ], + [ + 1538, + 475 + ], + [ + 1532, + 483 + ], + [ + 1531, + 529 + ], + [ + 1521, + 536 + ], + [ + 1521, + 538 + ], + [ + 1546, + 538 + ], + [ + 1556, + 535 + ], + [ + 1551, + 529 + ], + [ + 1548, + 479 + ], + [ + 1543, + 475 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1647, + 481 + ], + [ + 1645, + 486 + ], + [ + 1640, + 490 + ], + [ + 1638, + 531 + ], + [ + 1631, + 537 + ], + [ + 1631, + 541 + ], + [ + 1662, + 540 + ], + [ + 1662, + 538 + ], + [ + 1656, + 531 + ], + [ + 1653, + 489 + ], + [ + 1648, + 485 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 2026, + 314 + ], + [ + 2032, + 309 + ], + [ + 2047, + 310 + ], + [ + 2047, + 377 + ], + [ + 2028, + 377 + ], + [ + 2026, + 372 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1834, + 395 + ], + [ + 1831, + 350 + ], + [ + 1830, + 319 + ], + [ + 1831, + 281 + ], + [ + 1830, + 250 + ], + [ + 1833, + 230 + ], + [ + 1830, + 207 + ], + [ + 1830, + 173 + ], + [ + 1816, + 159 + ], + [ + 1811, + 151 + ], + [ + 1787, + 152 + ], + [ + 1769, + 145 + ], + [ + 1764, + 165 + ], + [ + 1745, + 204 + ], + [ + 1731, + 231 + ], + [ + 1688, + 259 + ], + [ + 1646, + 251 + ], + [ + 1593, + 254 + ], + [ + 1542, + 249 + ], + [ + 1519, + 244 + ], + [ + 1519, + 226 + ], + [ + 1547, + 213 + ], + [ + 1589, + 210 + ], + [ + 1611, + 206 + ], + [ + 1611, + 192 + ], + [ + 1619, + 178 + ], + [ + 1621, + 166 + ], + [ + 1583, + 164 + ], + [ + 1572, + 151 + ], + [ + 1583, + 124 + ], + [ + 1560, + 116 + ], + [ + 1563, + 99 + ], + [ + 1568, + 84 + ], + [ + 1547, + 79 + ], + [ + 1534, + 67 + ], + [ + 1519, + 43 + ], + [ + 1522, + 33 + ], + [ + 1552, + 26 + ], + [ + 1541, + 16 + ], + [ + 1530, + 1 + ], + [ + 2047, + 1 + ], + [ + 2047, + 1 + ], + [ + 2047, + 163 + ], + [ + 1971, + 168 + ], + [ + 1909, + 152 + ], + [ + 1891, + 145 + ], + [ + 1875, + 188 + ], + [ + 1863, + 236 + ], + [ + 1863, + 298 + ], + [ + 1869, + 369 + ], + [ + 1868, + 420 + ], + [ + 1867, + 465 + ], + [ + 1865, + 502 + ], + [ + 1873, + 536 + ], + [ + 1838, + 537 + ], + [ + 1814, + 535 + ], + [ + 1820, + 526 + ], + [ + 1826, + 497 + ], + [ + 1826, + 468 + ], + [ + 1831, + 419 + ], + [ + 1830, + 405 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000063_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000063_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..19abc9247e6d77602fdde951d1be880eec34e0e3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000063_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000064_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000064_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d15b96906b9cc6d8857129053b26ecb2a0fe5d0a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000064_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000064_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000064_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..9ea0bfd15971788adc02ab73c8727916e213342c --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000064_000019_gtFine_polygons.json @@ -0,0 +1,6164 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1231, + 0 + ], + [ + 900, + 0 + ], + [ + 915, + 188 + ], + [ + 1020, + 363 + ], + [ + 1100, + 376 + ], + [ + 1204, + 141 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 0, + 1024 + ], + [ + 0, + 396 + ], + [ + 1773, + 357 + ], + [ + 2048, + 484 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 407, + 237 + ], + [ + 183, + 167 + ], + [ + 0, + 173 + ], + [ + 0, + 532 + ], + [ + 359, + 504 + ], + [ + 426, + 463 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 373, + 438 + ], + [ + 311, + 440 + ], + [ + 237, + 440 + ], + [ + 145, + 443 + ], + [ + 91, + 443 + ], + [ + 37, + 443 + ], + [ + 0, + 445 + ], + [ + 0, + 596 + ], + [ + 380, + 583 + ], + [ + 381, + 442 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 178, + 808 + ], + [ + 0, + 885 + ], + [ + 0, + 550 + ], + [ + 81, + 550 + ], + [ + 218, + 548 + ], + [ + 301, + 545 + ], + [ + 331, + 537 + ], + [ + 572, + 553 + ], + [ + 702, + 551 + ], + [ + 726, + 550 + ], + [ + 755, + 554 + ], + [ + 759, + 564 + ], + [ + 733, + 576 + ], + [ + 631, + 619 + ], + [ + 449, + 695 + ], + [ + 287, + 764 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 142, + 435 + ], + [ + 132, + 427 + ], + [ + 88, + 425 + ], + [ + 70, + 422 + ], + [ + 40, + 424 + ], + [ + 42, + 445 + ], + [ + 93, + 445 + ], + [ + 107, + 445 + ], + [ + 138, + 443 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 184, + 430 + ], + [ + 170, + 431 + ], + [ + 157, + 434 + ], + [ + 150, + 437 + ], + [ + 154, + 445 + ], + [ + 218, + 445 + ], + [ + 230, + 442 + ], + [ + 227, + 431 + ], + [ + 212, + 429 + ], + [ + 199, + 430 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 357, + 398 + ], + [ + 341, + 400 + ], + [ + 317, + 409 + ], + [ + 306, + 401 + ], + [ + 309, + 369 + ], + [ + 328, + 353 + ], + [ + 349, + 353 + ], + [ + 358, + 343 + ], + [ + 372, + 335 + ], + [ + 383, + 397 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 55, + 384 + ], + [ + 55, + 423 + ], + [ + 41, + 425 + ], + [ + 42, + 366 + ], + [ + 53, + 365 + ], + [ + 55, + 378 + ], + [ + 138, + 377 + ], + [ + 186, + 377 + ], + [ + 186, + 350 + ], + [ + 183, + 343 + ], + [ + 190, + 344 + ], + [ + 188, + 350 + ], + [ + 189, + 376 + ], + [ + 269, + 374 + ], + [ + 363, + 371 + ], + [ + 363, + 353 + ], + [ + 369, + 351 + ], + [ + 374, + 439 + ], + [ + 366, + 439 + ], + [ + 364, + 377 + ], + [ + 309, + 379 + ], + [ + 239, + 380 + ], + [ + 190, + 382 + ], + [ + 189, + 431 + ], + [ + 181, + 431 + ], + [ + 181, + 414 + ], + [ + 186, + 413 + ], + [ + 186, + 382 + ], + [ + 122, + 383 + ], + [ + 119, + 388 + ], + [ + 121, + 409 + ], + [ + 127, + 405 + ], + [ + 127, + 396 + ], + [ + 131, + 397 + ], + [ + 134, + 415 + ], + [ + 138, + 417 + ], + [ + 144, + 427 + ], + [ + 138, + 431 + ], + [ + 131, + 428 + ], + [ + 112, + 427 + ], + [ + 110, + 388 + ], + [ + 104, + 384 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 159, + 394 + ], + [ + 158, + 399 + ], + [ + 150, + 400 + ], + [ + 150, + 402 + ], + [ + 156, + 403 + ], + [ + 160, + 411 + ], + [ + 161, + 416 + ], + [ + 162, + 434 + ], + [ + 169, + 431 + ], + [ + 170, + 408 + ], + [ + 167, + 389 + ], + [ + 165, + 389 + ], + [ + 163, + 394 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 349, + 428 + ], + [ + 336, + 426 + ], + [ + 321, + 425 + ], + [ + 318, + 420 + ], + [ + 312, + 419 + ], + [ + 307, + 429 + ], + [ + 314, + 441 + ], + [ + 367, + 439 + ], + [ + 368, + 434 + ], + [ + 367, + 427 + ], + [ + 351, + 429 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1083, + 351 + ], + [ + 1074, + 352 + ], + [ + 1061, + 352 + ], + [ + 1057, + 348 + ], + [ + 1044, + 346 + ], + [ + 1038, + 351 + ], + [ + 1020, + 386 + ], + [ + 1032, + 427 + ], + [ + 1057, + 427 + ], + [ + 1098, + 419 + ], + [ + 1087, + 355 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1050, + 397 + ], + [ + 1047, + 386 + ], + [ + 1053, + 377 + ], + [ + 1045, + 360 + ], + [ + 1048, + 353 + ], + [ + 1038, + 345 + ], + [ + 1038, + 340 + ], + [ + 1040, + 330 + ], + [ + 1040, + 319 + ], + [ + 1038, + 313 + ], + [ + 1038, + 304 + ], + [ + 1036, + 296 + ], + [ + 1029, + 293 + ], + [ + 1035, + 284 + ], + [ + 1027, + 274 + ], + [ + 1016, + 263 + ], + [ + 1020, + 251 + ], + [ + 1021, + 244 + ], + [ + 1010, + 238 + ], + [ + 1008, + 237 + ], + [ + 1004, + 229 + ], + [ + 1001, + 223 + ], + [ + 1002, + 211 + ], + [ + 988, + 202 + ], + [ + 984, + 204 + ], + [ + 977, + 200 + ], + [ + 975, + 193 + ], + [ + 976, + 180 + ], + [ + 965, + 182 + ], + [ + 960, + 179 + ], + [ + 962, + 172 + ], + [ + 980, + 169 + ], + [ + 986, + 158 + ], + [ + 992, + 143 + ], + [ + 978, + 145 + ], + [ + 965, + 137 + ], + [ + 970, + 128 + ], + [ + 984, + 130 + ], + [ + 993, + 128 + ], + [ + 994, + 125 + ], + [ + 991, + 122 + ], + [ + 996, + 110 + ], + [ + 1006, + 105 + ], + [ + 1010, + 100 + ], + [ + 1003, + 92 + ], + [ + 1006, + 86 + ], + [ + 999, + 81 + ], + [ + 1000, + 74 + ], + [ + 992, + 72 + ], + [ + 990, + 69 + ], + [ + 994, + 63 + ], + [ + 990, + 56 + ], + [ + 999, + 48 + ], + [ + 999, + 45 + ], + [ + 989, + 41 + ], + [ + 976, + 38 + ], + [ + 976, + 36 + ], + [ + 990, + 33 + ], + [ + 995, + 23 + ], + [ + 988, + 16 + ], + [ + 978, + 19 + ], + [ + 974, + 14 + ], + [ + 973, + 6 + ], + [ + 977, + 3 + ], + [ + 974, + 0 + ], + [ + 964, + 0 + ], + [ + 967, + 6 + ], + [ + 960, + 13 + ], + [ + 960, + 23 + ], + [ + 963, + 28 + ], + [ + 963, + 35 + ], + [ + 955, + 28 + ], + [ + 948, + 26 + ], + [ + 943, + 21 + ], + [ + 936, + 25 + ], + [ + 938, + 18 + ], + [ + 937, + 8 + ], + [ + 943, + 0 + ], + [ + 0, + 1 + ], + [ + 0, + 446 + ], + [ + 50, + 443 + ], + [ + 47, + 418 + ], + [ + 47, + 405 + ], + [ + 46, + 392 + ], + [ + 46, + 384 + ], + [ + 48, + 370 + ], + [ + 58, + 375 + ], + [ + 72, + 378 + ], + [ + 79, + 383 + ], + [ + 82, + 387 + ], + [ + 83, + 420 + ], + [ + 83, + 425 + ], + [ + 103, + 426 + ], + [ + 102, + 388 + ], + [ + 100, + 382 + ], + [ + 100, + 377 + ], + [ + 103, + 375 + ], + [ + 110, + 377 + ], + [ + 116, + 386 + ], + [ + 120, + 387 + ], + [ + 123, + 381 + ], + [ + 133, + 378 + ], + [ + 131, + 376 + ], + [ + 126, + 369 + ], + [ + 136, + 371 + ], + [ + 136, + 380 + ], + [ + 138, + 385 + ], + [ + 136, + 399 + ], + [ + 137, + 416 + ], + [ + 135, + 429 + ], + [ + 137, + 443 + ], + [ + 157, + 443 + ], + [ + 154, + 434 + ], + [ + 153, + 425 + ], + [ + 152, + 413 + ], + [ + 155, + 391 + ], + [ + 160, + 377 + ], + [ + 168, + 360 + ], + [ + 170, + 351 + ], + [ + 181, + 334 + ], + [ + 184, + 322 + ], + [ + 195, + 327 + ], + [ + 201, + 337 + ], + [ + 209, + 347 + ], + [ + 214, + 359 + ], + [ + 221, + 377 + ], + [ + 221, + 393 + ], + [ + 223, + 400 + ], + [ + 224, + 414 + ], + [ + 220, + 419 + ], + [ + 214, + 426 + ], + [ + 220, + 434 + ], + [ + 226, + 443 + ], + [ + 246, + 443 + ], + [ + 240, + 435 + ], + [ + 240, + 423 + ], + [ + 239, + 407 + ], + [ + 240, + 384 + ], + [ + 246, + 375 + ], + [ + 263, + 362 + ], + [ + 265, + 377 + ], + [ + 263, + 396 + ], + [ + 260, + 425 + ], + [ + 257, + 450 + ], + [ + 249, + 477 + ], + [ + 228, + 481 + ], + [ + 225, + 483 + ], + [ + 263, + 484 + ], + [ + 325, + 485 + ], + [ + 316, + 449 + ], + [ + 312, + 418 + ], + [ + 311, + 407 + ], + [ + 313, + 399 + ], + [ + 312, + 385 + ], + [ + 317, + 374 + ], + [ + 322, + 368 + ], + [ + 332, + 350 + ], + [ + 346, + 338 + ], + [ + 370, + 330 + ], + [ + 541, + 414 + ], + [ + 750, + 448 + ], + [ + 1018, + 423 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 215, + 489 + ], + [ + 215, + 470 + ], + [ + 221, + 449 + ], + [ + 174, + 450 + ], + [ + 166, + 458 + ], + [ + 153, + 459 + ], + [ + 143, + 459 + ], + [ + 138, + 471 + ], + [ + 140, + 485 + ], + [ + 137, + 492 + ], + [ + 147, + 492 + ], + [ + 148, + 484 + ], + [ + 151, + 479 + ], + [ + 158, + 477 + ], + [ + 163, + 483 + ], + [ + 162, + 490 + ], + [ + 171, + 490 + ], + [ + 171, + 475 + ], + [ + 186, + 475 + ], + [ + 186, + 485 + ], + [ + 182, + 490 + ], + [ + 194, + 490 + ], + [ + 195, + 474 + ], + [ + 208, + 474 + ], + [ + 208, + 486 + ], + [ + 205, + 492 + ], + [ + 213, + 491 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 104, + 477 + ], + [ + 93, + 493 + ], + [ + 87, + 544 + ], + [ + 79, + 552 + ], + [ + 125, + 552 + ], + [ + 125, + 546 + ], + [ + 119, + 539 + ], + [ + 114, + 489 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 71, + 216 + ], + [ + 87, + 196 + ], + [ + 84, + 179 + ], + [ + 64, + 143 + ], + [ + 46, + 140 + ], + [ + 36, + 142 + ], + [ + 41, + 219 + ], + [ + 55, + 219 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 54, + 745 + ], + [ + 51, + 641 + ], + [ + 45, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 750 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1299, + 488 + ], + [ + 1206, + 467 + ], + [ + 1200, + 457 + ], + [ + 1222, + 451 + ], + [ + 1375, + 452 + ], + [ + 1548, + 436 + ], + [ + 2048, + 447 + ], + [ + 2048, + 590 + ], + [ + 1655, + 579 + ], + [ + 1453, + 575 + ], + [ + 1438, + 570 + ], + [ + 1411, + 563 + ], + [ + 1409, + 560 + ], + [ + 1406, + 556 + ], + [ + 1419, + 550 + ], + [ + 1451, + 545 + ], + [ + 1491, + 544 + ], + [ + 1439, + 532 + ], + [ + 1406, + 523 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1833, + 470 + ], + [ + 1807, + 471 + ], + [ + 1764, + 470 + ], + [ + 1685, + 465 + ], + [ + 1572, + 458 + ], + [ + 1550, + 456 + ], + [ + 1508, + 455 + ], + [ + 1453, + 457 + ], + [ + 1317, + 447 + ], + [ + 1195, + 392 + ], + [ + 1209, + 325 + ], + [ + 1657, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 485 + ], + [ + 1949, + 478 + ], + [ + 1884, + 474 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 1483, + 399 + ], + [ + 1478, + 394 + ], + [ + 1473, + 396 + ], + [ + 1472, + 401 + ], + [ + 1466, + 402 + ], + [ + 1466, + 408 + ], + [ + 1463, + 410 + ], + [ + 1462, + 406 + ], + [ + 1460, + 397 + ], + [ + 1458, + 393 + ], + [ + 1448, + 407 + ], + [ + 1441, + 409 + ], + [ + 1438, + 417 + ], + [ + 1480, + 455 + ], + [ + 1508, + 445 + ], + [ + 1524, + 428 + ], + [ + 1521, + 409 + ], + [ + 1513, + 400 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1327, + 431 + ], + [ + 1320, + 418 + ], + [ + 1320, + 408 + ], + [ + 1334, + 405 + ], + [ + 1342, + 401 + ], + [ + 1340, + 398 + ], + [ + 1323, + 391 + ], + [ + 1322, + 375 + ], + [ + 1330, + 368 + ], + [ + 1328, + 363 + ], + [ + 1299, + 362 + ], + [ + 1263, + 372 + ], + [ + 1242, + 379 + ], + [ + 1213, + 389 + ], + [ + 1207, + 390 + ], + [ + 1190, + 393 + ], + [ + 1169, + 397 + ], + [ + 1151, + 405 + ], + [ + 1145, + 416 + ], + [ + 1215, + 448 + ], + [ + 1311, + 460 + ], + [ + 1329, + 437 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1596, + 399 + ], + [ + 1595, + 393 + ], + [ + 1597, + 388 + ], + [ + 1600, + 387 + ], + [ + 1603, + 389 + ], + [ + 1605, + 393 + ], + [ + 1605, + 398 + ], + [ + 1614, + 402 + ], + [ + 1619, + 415 + ], + [ + 1619, + 421 + ], + [ + 1614, + 427 + ], + [ + 1615, + 442 + ], + [ + 1615, + 453 + ], + [ + 1615, + 470 + ], + [ + 1615, + 474 + ], + [ + 1612, + 476 + ], + [ + 1608, + 474 + ], + [ + 1608, + 462 + ], + [ + 1607, + 451 + ], + [ + 1603, + 440 + ], + [ + 1601, + 453 + ], + [ + 1599, + 462 + ], + [ + 1597, + 471 + ], + [ + 1594, + 474 + ], + [ + 1591, + 470 + ], + [ + 1589, + 467 + ], + [ + 1590, + 463 + ], + [ + 1591, + 463 + ], + [ + 1594, + 443 + ], + [ + 1593, + 429 + ], + [ + 1588, + 423 + ], + [ + 1587, + 419 + ], + [ + 1589, + 406 + ], + [ + 1591, + 402 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1648, + 408 + ], + [ + 1641, + 390 + ], + [ + 1637, + 387 + ], + [ + 1633, + 383 + ], + [ + 1628, + 385 + ], + [ + 1628, + 391 + ], + [ + 1630, + 396 + ], + [ + 1625, + 399 + ], + [ + 1622, + 409 + ], + [ + 1621, + 422 + ], + [ + 1622, + 424 + ], + [ + 1624, + 428 + ], + [ + 1624, + 446 + ], + [ + 1626, + 457 + ], + [ + 1629, + 468 + ], + [ + 1628, + 471 + ], + [ + 1632, + 473 + ], + [ + 1638, + 472 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1678, + 409 + ], + [ + 1678, + 399 + ], + [ + 1675, + 395 + ], + [ + 1669, + 399 + ], + [ + 1667, + 409 + ], + [ + 1661, + 417 + ], + [ + 1659, + 430 + ], + [ + 1665, + 447 + ], + [ + 1668, + 457 + ], + [ + 1669, + 463 + ], + [ + 1669, + 470 + ], + [ + 1674, + 471 + ], + [ + 1678, + 458 + ], + [ + 1680, + 454 + ], + [ + 1685, + 454 + ], + [ + 1688, + 451 + ], + [ + 1685, + 442 + ], + [ + 1687, + 428 + ], + [ + 1684, + 415 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1648, + 405 + ], + [ + 1647, + 397 + ], + [ + 1638, + 399 + ], + [ + 1637, + 403 + ], + [ + 1632, + 409 + ], + [ + 1631, + 427 + ], + [ + 1629, + 438 + ], + [ + 1633, + 443 + ], + [ + 1638, + 469 + ], + [ + 1638, + 477 + ], + [ + 1641, + 481 + ], + [ + 1649, + 484 + ], + [ + 1654, + 484 + ], + [ + 1655, + 480 + ], + [ + 1655, + 473 + ], + [ + 1656, + 470 + ], + [ + 1656, + 451 + ], + [ + 1665, + 447 + ], + [ + 1666, + 438 + ], + [ + 1661, + 430 + ], + [ + 1656, + 428 + ], + [ + 1656, + 425 + ], + [ + 1663, + 427 + ], + [ + 1663, + 422 + ], + [ + 1659, + 411 + ], + [ + 1656, + 407 + ], + [ + 1652, + 405 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1704, + 396 + ], + [ + 1702, + 387 + ], + [ + 1698, + 386 + ], + [ + 1693, + 387 + ], + [ + 1691, + 393 + ], + [ + 1692, + 398 + ], + [ + 1688, + 402 + ], + [ + 1686, + 413 + ], + [ + 1686, + 423 + ], + [ + 1690, + 430 + ], + [ + 1695, + 453 + ], + [ + 1694, + 463 + ], + [ + 1695, + 470 + ], + [ + 1707, + 471 + ], + [ + 1712, + 399 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1079, + 407 + ], + [ + 1079, + 390 + ], + [ + 1075, + 375 + ], + [ + 1075, + 353 + ], + [ + 1069, + 341 + ], + [ + 1075, + 334 + ], + [ + 1081, + 328 + ], + [ + 1093, + 320 + ], + [ + 1080, + 311 + ], + [ + 1074, + 304 + ], + [ + 1066, + 291 + ], + [ + 1081, + 286 + ], + [ + 1083, + 271 + ], + [ + 1087, + 263 + ], + [ + 1094, + 263 + ], + [ + 1100, + 261 + ], + [ + 1092, + 241 + ], + [ + 1097, + 205 + ], + [ + 1106, + 206 + ], + [ + 1099, + 185 + ], + [ + 1106, + 176 + ], + [ + 1111, + 158 + ], + [ + 1115, + 153 + ], + [ + 1108, + 145 + ], + [ + 1117, + 144 + ], + [ + 1122, + 141 + ], + [ + 1134, + 150 + ], + [ + 1131, + 138 + ], + [ + 1115, + 117 + ], + [ + 1115, + 112 + ], + [ + 1123, + 107 + ], + [ + 1131, + 102 + ], + [ + 1143, + 94 + ], + [ + 1131, + 86 + ], + [ + 1131, + 81 + ], + [ + 1140, + 84 + ], + [ + 1142, + 72 + ], + [ + 1138, + 68 + ], + [ + 1131, + 70 + ], + [ + 1133, + 62 + ], + [ + 1124, + 65 + ], + [ + 1118, + 67 + ], + [ + 1101, + 58 + ], + [ + 1109, + 57 + ], + [ + 1121, + 57 + ], + [ + 1120, + 55 + ], + [ + 1124, + 59 + ], + [ + 1129, + 56 + ], + [ + 1135, + 52 + ], + [ + 1139, + 41 + ], + [ + 1124, + 45 + ], + [ + 1120, + 40 + ], + [ + 1127, + 41 + ], + [ + 1142, + 37 + ], + [ + 1147, + 31 + ], + [ + 1141, + 30 + ], + [ + 1150, + 28 + ], + [ + 1154, + 18 + ], + [ + 1140, + 22 + ], + [ + 1107, + 25 + ], + [ + 1080, + 25 + ], + [ + 1057, + 31 + ], + [ + 1051, + 31 + ], + [ + 1039, + 24 + ], + [ + 1046, + 24 + ], + [ + 1048, + 27 + ], + [ + 1069, + 26 + ], + [ + 1067, + 20 + ], + [ + 1075, + 24 + ], + [ + 1083, + 20 + ], + [ + 1082, + 15 + ], + [ + 1087, + 20 + ], + [ + 1113, + 19 + ], + [ + 1110, + 6 + ], + [ + 1118, + 13 + ], + [ + 1134, + 12 + ], + [ + 1141, + 17 + ], + [ + 1155, + 13 + ], + [ + 1158, + 8 + ], + [ + 1156, + 5 + ], + [ + 1141, + 6 + ], + [ + 1136, + 6 + ], + [ + 1137, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 124 + ], + [ + 1965, + 150 + ], + [ + 1890, + 173 + ], + [ + 1818, + 172 + ], + [ + 1788, + 184 + ], + [ + 1772, + 168 + ], + [ + 1756, + 186 + ], + [ + 1752, + 204 + ], + [ + 1773, + 223 + ], + [ + 1772, + 228 + ], + [ + 1746, + 228 + ], + [ + 1752, + 268 + ], + [ + 1754, + 326 + ], + [ + 1756, + 411 + ], + [ + 1757, + 455 + ], + [ + 1757, + 468 + ], + [ + 1745, + 541 + ], + [ + 1692, + 544 + ], + [ + 1701, + 532 + ], + [ + 1705, + 475 + ], + [ + 1709, + 408 + ], + [ + 1709, + 343 + ], + [ + 1707, + 289 + ], + [ + 1702, + 246 + ], + [ + 1701, + 222 + ], + [ + 1679, + 222 + ], + [ + 1672, + 231 + ], + [ + 1654, + 222 + ], + [ + 1618, + 213 + ], + [ + 1601, + 195 + ], + [ + 1579, + 205 + ], + [ + 1560, + 214 + ], + [ + 1544, + 212 + ], + [ + 1527, + 215 + ], + [ + 1506, + 250 + ], + [ + 1507, + 312 + ], + [ + 1512, + 379 + ], + [ + 1512, + 415 + ], + [ + 1510, + 480 + ], + [ + 1521, + 501 + ], + [ + 1492, + 498 + ], + [ + 1479, + 447 + ], + [ + 1481, + 349 + ], + [ + 1480, + 311 + ], + [ + 1479, + 282 + ], + [ + 1481, + 259 + ], + [ + 1468, + 244 + ], + [ + 1463, + 246 + ], + [ + 1459, + 259 + ], + [ + 1465, + 273 + ], + [ + 1472, + 279 + ], + [ + 1463, + 289 + ], + [ + 1453, + 290 + ], + [ + 1439, + 284 + ], + [ + 1425, + 291 + ], + [ + 1404, + 295 + ], + [ + 1398, + 314 + ], + [ + 1398, + 346 + ], + [ + 1395, + 357 + ], + [ + 1398, + 365 + ], + [ + 1396, + 395 + ], + [ + 1396, + 411 + ], + [ + 1385, + 422 + ], + [ + 1382, + 411 + ], + [ + 1384, + 370 + ], + [ + 1381, + 344 + ], + [ + 1380, + 335 + ], + [ + 1383, + 326 + ], + [ + 1385, + 313 + ], + [ + 1380, + 298 + ], + [ + 1372, + 295 + ], + [ + 1355, + 293 + ], + [ + 1355, + 302 + ], + [ + 1367, + 308 + ], + [ + 1370, + 314 + ], + [ + 1356, + 318 + ], + [ + 1355, + 327 + ], + [ + 1360, + 329 + ], + [ + 1365, + 335 + ], + [ + 1365, + 346 + ], + [ + 1365, + 355 + ], + [ + 1353, + 361 + ], + [ + 1333, + 377 + ], + [ + 1329, + 377 + ], + [ + 1325, + 370 + ], + [ + 1321, + 378 + ], + [ + 1320, + 403 + ], + [ + 1322, + 425 + ], + [ + 1325, + 440 + ], + [ + 1308, + 463 + ], + [ + 1297, + 455 + ], + [ + 1301, + 418 + ], + [ + 1301, + 375 + ], + [ + 1291, + 376 + ], + [ + 1275, + 391 + ], + [ + 1275, + 409 + ], + [ + 1279, + 445 + ], + [ + 1279, + 458 + ], + [ + 1265, + 461 + ], + [ + 1264, + 436 + ], + [ + 1263, + 407 + ], + [ + 1258, + 383 + ], + [ + 1249, + 380 + ], + [ + 1247, + 402 + ], + [ + 1245, + 448 + ], + [ + 1232, + 444 + ], + [ + 1235, + 424 + ], + [ + 1234, + 399 + ], + [ + 1236, + 384 + ], + [ + 1234, + 371 + ], + [ + 1229, + 368 + ], + [ + 1228, + 383 + ], + [ + 1228, + 416 + ], + [ + 1223, + 426 + ], + [ + 1223, + 435 + ], + [ + 1223, + 444 + ], + [ + 1204, + 445 + ], + [ + 1163, + 427 + ], + [ + 1162, + 406 + ], + [ + 1169, + 401 + ], + [ + 1167, + 398 + ], + [ + 1121, + 417 + ], + [ + 1088, + 420 + ], + [ + 1082, + 413 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1271, + 471 + ], + [ + 1269, + 449 + ], + [ + 1250, + 444 + ], + [ + 1242, + 440 + ], + [ + 1215, + 436 + ], + [ + 1200, + 435 + ], + [ + 1208, + 458 + ], + [ + 1234, + 464 + ], + [ + 1258, + 470 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1303, + 476 + ], + [ + 1278, + 470 + ], + [ + 1278, + 450 + ], + [ + 1312, + 453 + ], + [ + 1313, + 470 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1271, + 354 + ], + [ + 1271, + 381 + ], + [ + 1291, + 381 + ], + [ + 1291, + 354 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1291, + 399 + ], + [ + 1291, + 381 + ], + [ + 1271, + 381 + ], + [ + 1271, + 400 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1796, + 385 + ], + [ + 1785, + 398 + ], + [ + 1786, + 403 + ], + [ + 1787, + 403 + ], + [ + 1785, + 432 + ], + [ + 1791, + 441 + ], + [ + 1791, + 475 + ], + [ + 1792, + 478 + ], + [ + 1799, + 479 + ], + [ + 1813, + 480 + ], + [ + 1813, + 478 + ], + [ + 1807, + 474 + ], + [ + 1812, + 446 + ], + [ + 1817, + 426 + ], + [ + 1818, + 408 + ], + [ + 1811, + 400 + ], + [ + 1811, + 399 + ], + [ + 1813, + 399 + ], + [ + 1824, + 396 + ], + [ + 1838, + 387 + ], + [ + 1814, + 382 + ], + [ + 1804, + 384 + ], + [ + 1796, + 385 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1925, + 383 + ], + [ + 1906, + 389 + ], + [ + 1909, + 397 + ], + [ + 1922, + 418 + ], + [ + 1922, + 453 + ], + [ + 1923, + 477 + ], + [ + 1903, + 474 + ], + [ + 1895, + 475 + ], + [ + 1891, + 474 + ], + [ + 1887, + 466 + ], + [ + 1887, + 449 + ], + [ + 1885, + 428 + ], + [ + 1885, + 410 + ], + [ + 1888, + 401 + ], + [ + 1895, + 398 + ], + [ + 1893, + 392 + ], + [ + 1875, + 396 + ], + [ + 1881, + 387 + ], + [ + 1900, + 378 + ], + [ + 1930, + 376 + ], + [ + 1925, + 383 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1890, + 432 + ], + [ + 1890, + 480 + ], + [ + 1914, + 480 + ], + [ + 1916, + 429 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1576, + 465 + ], + [ + 1566, + 477 + ], + [ + 1562, + 484 + ], + [ + 1552, + 488 + ], + [ + 1547, + 488 + ], + [ + 1543, + 485 + ], + [ + 1541, + 474 + ], + [ + 1529, + 462 + ], + [ + 1518, + 476 + ], + [ + 1516, + 549 + ], + [ + 1504, + 558 + ], + [ + 1506, + 563 + ], + [ + 1561, + 563 + ], + [ + 1559, + 559 + ], + [ + 1548, + 551 + ], + [ + 1544, + 491 + ], + [ + 1551, + 493 + ], + [ + 1559, + 491 + ], + [ + 1563, + 489 + ], + [ + 1568, + 484 + ], + [ + 1571, + 528 + ], + [ + 1562, + 539 + ], + [ + 1571, + 540 + ], + [ + 1602, + 537 + ], + [ + 1594, + 527 + ], + [ + 1586, + 477 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1773, + 553 + ], + [ + 1768, + 470 + ], + [ + 1757, + 458 + ], + [ + 1747, + 471 + ], + [ + 1741, + 552 + ], + [ + 1727, + 566 + ], + [ + 1735, + 568 + ], + [ + 1778, + 568 + ], + [ + 1788, + 566 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1951, + 550 + ], + [ + 1947, + 470 + ], + [ + 1943, + 460 + ], + [ + 1942, + 421 + ], + [ + 1939, + 416 + ], + [ + 1940, + 154 + ], + [ + 1924, + 151 + ], + [ + 1924, + 413 + ], + [ + 1916, + 425 + ], + [ + 1917, + 458 + ], + [ + 1912, + 469 + ], + [ + 1910, + 546 + ], + [ + 1889, + 564 + ], + [ + 1904, + 566 + ], + [ + 1967, + 565 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1822, + 160 + ], + [ + 1818, + 139 + ], + [ + 1823, + 123 + ], + [ + 1838, + 111 + ], + [ + 1840, + 97 + ], + [ + 1847, + 90 + ], + [ + 1843, + 88 + ], + [ + 1840, + 60 + ], + [ + 1857, + 58 + ], + [ + 1882, + 60 + ], + [ + 1925, + 88 + ], + [ + 1927, + 71 + ], + [ + 1938, + 72 + ], + [ + 1943, + 87 + ], + [ + 1960, + 71 + ], + [ + 1991, + 56 + ], + [ + 2019, + 54 + ], + [ + 2033, + 55 + ], + [ + 2023, + 98 + ], + [ + 2028, + 112 + ], + [ + 2039, + 131 + ], + [ + 2038, + 157 + ], + [ + 2020, + 186 + ], + [ + 2002, + 185 + ], + [ + 1982, + 157 + ], + [ + 1982, + 135 + ], + [ + 1994, + 119 + ], + [ + 2005, + 103 + ], + [ + 2014, + 97 + ], + [ + 2016, + 83 + ], + [ + 1994, + 85 + ], + [ + 1965, + 100 + ], + [ + 1946, + 121 + ], + [ + 1943, + 128 + ], + [ + 1945, + 152 + ], + [ + 1939, + 162 + ], + [ + 1923, + 160 + ], + [ + 1919, + 152 + ], + [ + 1921, + 145 + ], + [ + 1921, + 130 + ], + [ + 1910, + 108 + ], + [ + 1885, + 92 + ], + [ + 1854, + 86 + ], + [ + 1851, + 88 + ], + [ + 1853, + 96 + ], + [ + 1859, + 106 + ], + [ + 1865, + 110 + ], + [ + 1871, + 122 + ], + [ + 1877, + 136 + ], + [ + 1879, + 159 + ], + [ + 1862, + 186 + ], + [ + 1842, + 187 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1101, + 402 + ], + [ + 1099, + 415 + ], + [ + 1088, + 412 + ], + [ + 1081, + 411 + ], + [ + 1073, + 408 + ], + [ + 1068, + 411 + ], + [ + 1069, + 416 + ], + [ + 1062, + 418 + ], + [ + 1061, + 422 + ], + [ + 1043, + 424 + ], + [ + 1040, + 421 + ], + [ + 1041, + 414 + ], + [ + 1058, + 414 + ], + [ + 1058, + 398 + ], + [ + 1038, + 398 + ], + [ + 1036, + 375 + ], + [ + 1030, + 379 + ], + [ + 1030, + 394 + ], + [ + 1032, + 401 + ], + [ + 1030, + 406 + ], + [ + 1024, + 406 + ], + [ + 1021, + 407 + ], + [ + 1022, + 412 + ], + [ + 1014, + 412 + ], + [ + 1011, + 410 + ], + [ + 995, + 414 + ], + [ + 989, + 414 + ], + [ + 980, + 420 + ], + [ + 975, + 421 + ], + [ + 975, + 414 + ], + [ + 981, + 412 + ], + [ + 986, + 401 + ], + [ + 984, + 397 + ], + [ + 973, + 394 + ], + [ + 976, + 387 + ], + [ + 950, + 388 + ], + [ + 953, + 404 + ], + [ + 955, + 423 + ], + [ + 1019, + 437 + ], + [ + 1029, + 439 + ], + [ + 1038, + 434 + ], + [ + 1048, + 432 + ], + [ + 1067, + 439 + ], + [ + 1072, + 439 + ], + [ + 1075, + 433 + ], + [ + 1079, + 435 + ], + [ + 1085, + 433 + ], + [ + 1089, + 436 + ], + [ + 1105, + 438 + ], + [ + 1112, + 439 + ], + [ + 1119, + 443 + ], + [ + 1164, + 429 + ], + [ + 1164, + 406 + ], + [ + 1168, + 401 + ], + [ + 1168, + 398 + ], + [ + 1160, + 396 + ], + [ + 1151, + 393 + ], + [ + 1146, + 390 + ], + [ + 1143, + 379 + ], + [ + 1119, + 378 + ], + [ + 1118, + 412 + ], + [ + 1113, + 409 + ], + [ + 1113, + 403 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1070, + 357 + ], + [ + 1069, + 372 + ], + [ + 1077, + 373 + ], + [ + 1078, + 358 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1068, + 444 + ], + [ + 1068, + 437 + ], + [ + 1068, + 431 + ], + [ + 1062, + 424 + ], + [ + 1048, + 424 + ], + [ + 1042, + 432 + ], + [ + 1042, + 444 + ], + [ + 1047, + 444 + ], + [ + 1047, + 442 + ], + [ + 1062, + 442 + ], + [ + 1062, + 444 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 858, + 341 + ], + [ + 854, + 361 + ], + [ + 857, + 368 + ], + [ + 854, + 386 + ], + [ + 858, + 391 + ], + [ + 859, + 403 + ], + [ + 861, + 403 + ], + [ + 861, + 391 + ], + [ + 865, + 383 + ], + [ + 865, + 373 + ], + [ + 862, + 360 + ], + [ + 860, + 345 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 825, + 408 + ], + [ + 822, + 399 + ], + [ + 777, + 399 + ], + [ + 776, + 420 + ], + [ + 822, + 419 + ], + [ + 826, + 410 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 784, + 354 + ], + [ + 768, + 354 + ], + [ + 768, + 380 + ], + [ + 785, + 381 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1017, + 441 + ], + [ + 1018, + 432 + ], + [ + 1007, + 426 + ], + [ + 998, + 427 + ], + [ + 1005, + 444 + ], + [ + 1013, + 443 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 998, + 449 + ], + [ + 1002, + 448 + ], + [ + 1005, + 447 + ], + [ + 1008, + 443 + ], + [ + 1008, + 439 + ], + [ + 1005, + 429 + ], + [ + 997, + 426 + ], + [ + 991, + 424 + ], + [ + 988, + 429 + ], + [ + 989, + 448 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 978, + 452 + ], + [ + 984, + 452 + ], + [ + 989, + 451 + ], + [ + 994, + 450 + ], + [ + 995, + 446 + ], + [ + 995, + 432 + ], + [ + 988, + 421 + ], + [ + 975, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 965, + 452 + ], + [ + 970, + 456 + ], + [ + 975, + 455 + ], + [ + 980, + 452 + ], + [ + 983, + 449 + ], + [ + 984, + 438 + ], + [ + 981, + 430 + ], + [ + 977, + 424 + ], + [ + 970, + 420 + ], + [ + 958, + 421 + ], + [ + 950, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 963, + 456 + ], + [ + 966, + 446 + ], + [ + 965, + 436 + ], + [ + 973, + 433 + ], + [ + 967, + 427 + ], + [ + 953, + 423 + ], + [ + 946, + 421 + ], + [ + 949, + 462 + ], + [ + 955, + 460 + ], + [ + 961, + 459 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1185, + 427 + ], + [ + 1182, + 421 + ], + [ + 1160, + 419 + ], + [ + 1157, + 417 + ], + [ + 1126, + 416 + ], + [ + 1122, + 422 + ], + [ + 1119, + 428 + ], + [ + 1118, + 435 + ], + [ + 1117, + 448 + ], + [ + 1121, + 449 + ], + [ + 1127, + 453 + ], + [ + 1133, + 455 + ], + [ + 1139, + 458 + ], + [ + 1145, + 460 + ], + [ + 1186, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1195, + 426 + ], + [ + 1182, + 424 + ], + [ + 1165, + 424 + ], + [ + 1159, + 426 + ], + [ + 1155, + 432 + ], + [ + 1152, + 436 + ], + [ + 1148, + 436 + ], + [ + 1147, + 438 + ], + [ + 1148, + 442 + ], + [ + 1145, + 454 + ], + [ + 1146, + 461 + ], + [ + 1146, + 468 + ], + [ + 1157, + 469 + ], + [ + 1160, + 465 + ], + [ + 1195, + 467 + ], + [ + 1196, + 469 + ], + [ + 1208, + 469 + ], + [ + 1208, + 456 + ], + [ + 1208, + 446 + ], + [ + 1208, + 440 + ], + [ + 1207, + 438 + ], + [ + 1203, + 436 + ], + [ + 1198, + 429 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 914, + 399 + ], + [ + 876, + 399 + ], + [ + 856, + 401 + ], + [ + 836, + 403 + ], + [ + 805, + 407 + ], + [ + 793, + 413 + ], + [ + 789, + 420 + ], + [ + 877, + 482 + ], + [ + 886, + 482 + ], + [ + 892, + 478 + ], + [ + 894, + 473 + ], + [ + 905, + 473 + ], + [ + 909, + 476 + ], + [ + 912, + 479 + ], + [ + 917, + 481 + ], + [ + 924, + 479 + ], + [ + 930, + 475 + ], + [ + 933, + 469 + ], + [ + 934, + 468 + ], + [ + 941, + 468 + ], + [ + 947, + 467 + ], + [ + 951, + 463 + ], + [ + 951, + 449 + ], + [ + 949, + 425 + ], + [ + 946, + 410 + ], + [ + 942, + 402 + ], + [ + 936, + 400 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 899, + 450 + ], + [ + 897, + 441 + ], + [ + 896, + 437 + ], + [ + 893, + 436 + ], + [ + 879, + 433 + ], + [ + 850, + 424 + ], + [ + 859, + 486 + ], + [ + 871, + 486 + ], + [ + 877, + 482 + ], + [ + 880, + 478 + ], + [ + 882, + 474 + ], + [ + 893, + 472 + ], + [ + 899, + 471 + ], + [ + 900, + 460 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 845, + 493 + ], + [ + 854, + 492 + ], + [ + 859, + 489 + ], + [ + 863, + 484 + ], + [ + 868, + 477 + ], + [ + 871, + 469 + ], + [ + 870, + 458 + ], + [ + 868, + 454 + ], + [ + 868, + 445 + ], + [ + 867, + 436 + ], + [ + 863, + 431 + ], + [ + 850, + 422 + ], + [ + 846, + 419 + ], + [ + 838, + 418 + ], + [ + 835, + 426 + ], + [ + 837, + 493 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 832, + 417 + ], + [ + 809, + 416 + ], + [ + 776, + 417 + ], + [ + 752, + 419 + ], + [ + 727, + 424 + ], + [ + 715, + 431 + ], + [ + 731, + 500 + ], + [ + 747, + 499 + ], + [ + 751, + 495 + ], + [ + 754, + 493 + ], + [ + 767, + 491 + ], + [ + 776, + 492 + ], + [ + 780, + 496 + ], + [ + 784, + 499 + ], + [ + 791, + 499 + ], + [ + 799, + 499 + ], + [ + 807, + 496 + ], + [ + 813, + 490 + ], + [ + 824, + 488 + ], + [ + 826, + 492 + ], + [ + 831, + 496 + ], + [ + 837, + 497 + ], + [ + 845, + 496 + ], + [ + 850, + 491 + ], + [ + 859, + 484 + ], + [ + 864, + 476 + ], + [ + 863, + 460 + ], + [ + 860, + 455 + ], + [ + 858, + 441 + ], + [ + 851, + 429 + ], + [ + 842, + 421 + ], + [ + 837, + 417 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 735, + 510 + ], + [ + 742, + 506 + ], + [ + 743, + 500 + ], + [ + 742, + 492 + ], + [ + 735, + 491 + ], + [ + 731, + 478 + ], + [ + 727, + 472 + ], + [ + 705, + 522 + ], + [ + 708, + 526 + ], + [ + 716, + 526 + ], + [ + 726, + 523 + ], + [ + 733, + 517 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 555, + 395 + ], + [ + 555, + 314 + ], + [ + 551, + 300 + ], + [ + 548, + 312 + ], + [ + 549, + 395 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 559, + 305 + ], + [ + 562, + 295 + ], + [ + 579, + 286 + ], + [ + 585, + 287 + ], + [ + 585, + 294 + ], + [ + 578, + 300 + ], + [ + 575, + 317 + ], + [ + 580, + 326 + ], + [ + 586, + 334 + ], + [ + 596, + 327 + ], + [ + 599, + 319 + ], + [ + 599, + 308 + ], + [ + 591, + 294 + ], + [ + 591, + 271 + ], + [ + 569, + 275 + ], + [ + 560, + 282 + ], + [ + 550, + 273 + ], + [ + 538, + 283 + ], + [ + 537, + 303 + ], + [ + 551, + 313 + ], + [ + 555, + 315 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 361, + 557 + ], + [ + 347, + 556 + ], + [ + 341, + 551 + ], + [ + 339, + 543 + ], + [ + 325, + 544 + ], + [ + 311, + 520 + ], + [ + 320, + 491 + ], + [ + 341, + 473 + ], + [ + 358, + 461 + ], + [ + 378, + 444 + ], + [ + 537, + 389 + ], + [ + 565, + 389 + ], + [ + 575, + 383 + ], + [ + 581, + 379 + ], + [ + 589, + 378 + ], + [ + 609, + 375 + ], + [ + 630, + 374 + ], + [ + 649, + 375 + ], + [ + 659, + 377 + ], + [ + 671, + 384 + ], + [ + 689, + 384 + ], + [ + 702, + 388 + ], + [ + 707, + 396 + ], + [ + 715, + 418 + ], + [ + 723, + 441 + ], + [ + 727, + 453 + ], + [ + 728, + 467 + ], + [ + 728, + 489 + ], + [ + 730, + 494 + ], + [ + 730, + 504 + ], + [ + 729, + 514 + ], + [ + 725, + 518 + ], + [ + 712, + 522 + ], + [ + 707, + 535 + ], + [ + 698, + 545 + ], + [ + 689, + 548 + ], + [ + 672, + 547 + ], + [ + 666, + 545 + ], + [ + 658, + 543 + ], + [ + 609, + 545 + ], + [ + 599, + 549 + ], + [ + 592, + 553 + ], + [ + 567, + 553 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 675, + 494 + ], + [ + 674, + 481 + ], + [ + 708, + 478 + ], + [ + 709, + 490 + ], + [ + 705, + 492 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 612, + 488 + ], + [ + 601, + 478 + ], + [ + 590, + 492 + ], + [ + 588, + 546 + ], + [ + 579, + 557 + ], + [ + 579, + 558 + ], + [ + 617, + 558 + ], + [ + 624, + 555 + ], + [ + 615, + 548 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 285, + 559 + ], + [ + 287, + 556 + ], + [ + 298, + 547 + ], + [ + 302, + 496 + ], + [ + 315, + 482 + ], + [ + 326, + 495 + ], + [ + 327, + 547 + ], + [ + 334, + 556 + ], + [ + 311, + 559 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 358, + 568 + ], + [ + 360, + 513 + ], + [ + 371, + 511 + ], + [ + 364, + 117 + ], + [ + 350, + 111 + ], + [ + 347, + 95 + ], + [ + 303, + 77 + ], + [ + 301, + 67 + ], + [ + 320, + 56 + ], + [ + 364, + 49 + ], + [ + 425, + 47 + ], + [ + 492, + 51 + ], + [ + 547, + 59 + ], + [ + 580, + 67 + ], + [ + 589, + 81 + ], + [ + 549, + 105 + ], + [ + 547, + 117 + ], + [ + 539, + 122 + ], + [ + 544, + 504 + ], + [ + 554, + 513 + ], + [ + 551, + 562 + ], + [ + 511, + 567 + ], + [ + 421, + 570 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 489, + 46 + ], + [ + 489, + 0 + ], + [ + 507, + 0 + ], + [ + 515, + 526 + ], + [ + 518, + 609 + ], + [ + 501, + 592 + ], + [ + 497, + 500 + ], + [ + 495, + 375 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 529, + 605 + ], + [ + 517, + 596 + ], + [ + 511, + 512 + ], + [ + 499, + 492 + ], + [ + 494, + 492 + ], + [ + 482, + 515 + ], + [ + 477, + 595 + ], + [ + 460, + 616 + ], + [ + 519, + 613 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 558, + 489 + ], + [ + 545, + 509 + ], + [ + 541, + 579 + ], + [ + 523, + 595 + ], + [ + 574, + 594 + ], + [ + 586, + 590 + ], + [ + 586, + 582 + ], + [ + 573, + 573 + ], + [ + 571, + 505 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1355, + 412 + ], + [ + 1348, + 269 + ], + [ + 1344, + 272 + ], + [ + 1347, + 315 + ], + [ + 1350, + 377 + ], + [ + 1351, + 419 + ], + [ + 1355, + 414 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1345, + 373 + ], + [ + 1332, + 374 + ], + [ + 1332, + 366 + ], + [ + 1353, + 365 + ], + [ + 1353, + 373 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1332, + 310 + ], + [ + 1332, + 345 + ], + [ + 1357, + 345 + ], + [ + 1356, + 311 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1357, + 365 + ], + [ + 1356, + 345 + ], + [ + 1332, + 345 + ], + [ + 1331, + 366 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1410, + 409 + ], + [ + 1378, + 408 + ], + [ + 1349, + 408 + ], + [ + 1339, + 412 + ], + [ + 1330, + 421 + ], + [ + 1317, + 441 + ], + [ + 1311, + 436 + ], + [ + 1306, + 436 + ], + [ + 1305, + 445 + ], + [ + 1310, + 450 + ], + [ + 1297, + 461 + ], + [ + 1295, + 470 + ], + [ + 1293, + 499 + ], + [ + 1292, + 523 + ], + [ + 1296, + 527 + ], + [ + 1313, + 529 + ], + [ + 1316, + 533 + ], + [ + 1319, + 537 + ], + [ + 1340, + 539 + ], + [ + 1344, + 536 + ], + [ + 1346, + 521 + ], + [ + 1371, + 522 + ], + [ + 1422, + 526 + ], + [ + 1425, + 534 + ], + [ + 1430, + 538 + ], + [ + 1442, + 539 + ], + [ + 1448, + 533 + ], + [ + 1449, + 525 + ], + [ + 1459, + 527 + ], + [ + 1461, + 540 + ], + [ + 1465, + 545 + ], + [ + 1490, + 544 + ], + [ + 1493, + 540 + ], + [ + 1493, + 523 + ], + [ + 1495, + 500 + ], + [ + 1493, + 477 + ], + [ + 1491, + 459 + ], + [ + 1485, + 452 + ], + [ + 1472, + 434 + ], + [ + 1461, + 419 + ], + [ + 1454, + 415 + ], + [ + 1428, + 411 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1445, + 475 + ], + [ + 1445, + 463 + ], + [ + 1395, + 460 + ], + [ + 1395, + 472 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000065_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000065_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..ef66f5b7510a49be635f1c49097272be98a841be Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000065_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000065_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000065_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..76738a6d114e8b0c5710713d56299469ae661066 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000065_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..f66e5cc357a3340df68a4c7ad89493c797c20cf0 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ba1c4740197f889122ff5185d74a5dd4f112fafe Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..b0e83426b0d069334d9b70d2cb92d2e25fe10fd1 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..d315a0b60dbb97130fee07aea0184525e893243b --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000066_000019_gtFine_polygons.json @@ -0,0 +1,4597 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1086, + 0 + ], + [ + 1021, + 331 + ], + [ + 902, + 346 + ], + [ + 754, + 155 + ], + [ + 749, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 0, + 1024 + ], + [ + 1, + 355 + ], + [ + 2047, + 342 + ], + [ + 2048, + 1023 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 935, + 312 + ], + [ + 924, + 312 + ], + [ + 911, + 316 + ], + [ + 864, + 350 + ], + [ + 852, + 394 + ], + [ + 865, + 426 + ], + [ + 981, + 408 + ], + [ + 976, + 328 + ], + [ + 959, + 325 + ], + [ + 953, + 318 + ], + [ + 945, + 321 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1101, + 394 + ], + [ + 1121, + 241 + ], + [ + 1535, + 1 + ], + [ + 2048, + 2 + ], + [ + 2047, + 548 + ], + [ + 1203, + 486 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 939, + 404 + ], + [ + 939, + 374 + ], + [ + 922, + 375 + ], + [ + 920, + 365 + ], + [ + 870, + 364 + ], + [ + 838, + 383 + ], + [ + 833, + 422 + ], + [ + 962, + 440 + ], + [ + 982, + 434 + ], + [ + 989, + 434 + ], + [ + 1002, + 426 + ], + [ + 1010, + 403 + ], + [ + 979, + 394 + ], + [ + 968, + 394 + ], + [ + 964, + 392 + ], + [ + 957, + 385 + ], + [ + 953, + 393 + ], + [ + 954, + 403 + ], + [ + 945, + 404 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 882, + 397 + ], + [ + 888, + 390 + ], + [ + 900, + 391 + ], + [ + 911, + 383 + ], + [ + 918, + 375 + ], + [ + 919, + 366 + ], + [ + 915, + 347 + ], + [ + 909, + 329 + ], + [ + 913, + 322 + ], + [ + 913, + 314 + ], + [ + 911, + 310 + ], + [ + 907, + 305 + ], + [ + 907, + 300 + ], + [ + 905, + 293 + ], + [ + 906, + 279 + ], + [ + 907, + 270 + ], + [ + 896, + 269 + ], + [ + 886, + 260 + ], + [ + 885, + 253 + ], + [ + 876, + 242 + ], + [ + 874, + 240 + ], + [ + 874, + 235 + ], + [ + 883, + 231 + ], + [ + 883, + 221 + ], + [ + 874, + 217 + ], + [ + 877, + 213 + ], + [ + 874, + 211 + ], + [ + 878, + 205 + ], + [ + 885, + 204 + ], + [ + 887, + 198 + ], + [ + 884, + 188 + ], + [ + 874, + 179 + ], + [ + 877, + 177 + ], + [ + 881, + 176 + ], + [ + 883, + 172 + ], + [ + 881, + 159 + ], + [ + 874, + 146 + ], + [ + 860, + 140 + ], + [ + 854, + 136 + ], + [ + 846, + 132 + ], + [ + 835, + 148 + ], + [ + 831, + 140 + ], + [ + 822, + 137 + ], + [ + 823, + 132 + ], + [ + 837, + 124 + ], + [ + 828, + 121 + ], + [ + 824, + 116 + ], + [ + 816, + 107 + ], + [ + 811, + 99 + ], + [ + 815, + 80 + ], + [ + 802, + 77 + ], + [ + 790, + 79 + ], + [ + 783, + 75 + ], + [ + 774, + 61 + ], + [ + 782, + 55 + ], + [ + 778, + 45 + ], + [ + 786, + 41 + ], + [ + 792, + 33 + ], + [ + 782, + 38 + ], + [ + 773, + 34 + ], + [ + 782, + 23 + ], + [ + 778, + 17 + ], + [ + 781, + 8 + ], + [ + 791, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 507 + ], + [ + 847, + 430 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 371, + 318 + ], + [ + 375, + 419 + ], + [ + 380, + 426 + ], + [ + 382, + 419 + ], + [ + 378, + 319 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 373, + 294 + ], + [ + 361, + 284 + ], + [ + 345, + 279 + ], + [ + 332, + 278 + ], + [ + 328, + 281 + ], + [ + 328, + 305 + ], + [ + 344, + 292 + ], + [ + 356, + 294 + ], + [ + 366, + 305 + ], + [ + 371, + 320 + ], + [ + 378, + 320 + ], + [ + 381, + 304 + ], + [ + 392, + 292 + ], + [ + 402, + 287 + ], + [ + 407, + 289 + ], + [ + 407, + 296 + ], + [ + 402, + 303 + ], + [ + 398, + 315 + ], + [ + 401, + 326 + ], + [ + 408, + 334 + ], + [ + 412, + 334 + ], + [ + 417, + 328 + ], + [ + 421, + 317 + ], + [ + 418, + 304 + ], + [ + 414, + 296 + ], + [ + 413, + 279 + ], + [ + 397, + 278 + ], + [ + 383, + 284 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 326, + 336 + ], + [ + 329, + 397 + ], + [ + 337, + 398 + ], + [ + 333, + 335 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 302, + 258 + ], + [ + 330, + 280 + ], + [ + 346, + 283 + ], + [ + 323, + 263 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 347, + 338 + ], + [ + 345, + 293 + ], + [ + 313, + 291 + ], + [ + 315, + 338 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 841, + 281 + ], + [ + 843, + 364 + ], + [ + 845, + 417 + ], + [ + 846, + 430 + ], + [ + 874, + 402 + ], + [ + 873, + 373 + ], + [ + 870, + 371 + ], + [ + 870, + 364 + ], + [ + 874, + 363 + ], + [ + 869, + 307 + ], + [ + 863, + 306 + ], + [ + 863, + 293 + ], + [ + 856, + 291 + ], + [ + 853, + 286 + ], + [ + 844, + 284 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 858, + 403 + ], + [ + 855, + 402 + ], + [ + 853, + 352 + ], + [ + 857, + 338 + ], + [ + 882, + 328 + ], + [ + 907, + 322 + ], + [ + 922, + 322 + ], + [ + 921, + 325 + ], + [ + 893, + 327 + ], + [ + 870, + 334 + ], + [ + 860, + 340 + ], + [ + 856, + 350 + ], + [ + 856, + 375 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 853, + 380 + ], + [ + 855, + 400 + ], + [ + 863, + 398 + ], + [ + 860, + 379 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 948, + 369 + ], + [ + 948, + 380 + ], + [ + 955, + 380 + ], + [ + 955, + 369 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 841, + 447 + ], + [ + 830, + 445 + ], + [ + 804, + 413 + ], + [ + 815, + 412 + ], + [ + 836, + 412 + ], + [ + 847, + 419 + ], + [ + 848, + 424 + ], + [ + 848, + 438 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 819, + 415 + ], + [ + 803, + 413 + ], + [ + 790, + 413 + ], + [ + 799, + 452 + ], + [ + 809, + 453 + ], + [ + 815, + 449 + ], + [ + 826, + 446 + ], + [ + 831, + 445 + ], + [ + 832, + 438 + ], + [ + 828, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 804, + 422 + ], + [ + 798, + 417 + ], + [ + 790, + 413 + ], + [ + 784, + 453 + ], + [ + 793, + 453 + ], + [ + 799, + 453 + ], + [ + 804, + 448 + ], + [ + 806, + 439 + ], + [ + 807, + 432 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 768, + 398 + ], + [ + 752, + 398 + ], + [ + 747, + 401 + ], + [ + 744, + 403 + ], + [ + 724, + 403 + ], + [ + 735, + 469 + ], + [ + 749, + 469 + ], + [ + 761, + 466 + ], + [ + 773, + 466 + ], + [ + 785, + 462 + ], + [ + 789, + 455 + ], + [ + 796, + 448 + ], + [ + 798, + 436 + ], + [ + 795, + 422 + ], + [ + 787, + 410 + ], + [ + 778, + 403 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 966, + 379 + ], + [ + 959, + 358 + ], + [ + 967, + 349 + ], + [ + 966, + 324 + ], + [ + 962, + 322 + ], + [ + 959, + 318 + ], + [ + 952, + 319 + ], + [ + 942, + 315 + ], + [ + 939, + 312 + ], + [ + 937, + 304 + ], + [ + 935, + 298 + ], + [ + 923, + 292 + ], + [ + 938, + 283 + ], + [ + 929, + 281 + ], + [ + 913, + 262 + ], + [ + 922, + 256 + ], + [ + 940, + 246 + ], + [ + 921, + 238 + ], + [ + 928, + 236 + ], + [ + 940, + 234 + ], + [ + 938, + 231 + ], + [ + 928, + 227 + ], + [ + 927, + 222 + ], + [ + 937, + 222 + ], + [ + 932, + 215 + ], + [ + 920, + 209 + ], + [ + 911, + 192 + ], + [ + 920, + 189 + ], + [ + 931, + 191 + ], + [ + 940, + 187 + ], + [ + 956, + 199 + ], + [ + 943, + 178 + ], + [ + 936, + 160 + ], + [ + 925, + 168 + ], + [ + 915, + 163 + ], + [ + 919, + 159 + ], + [ + 927, + 159 + ], + [ + 933, + 155 + ], + [ + 923, + 148 + ], + [ + 923, + 131 + ], + [ + 933, + 123 + ], + [ + 950, + 123 + ], + [ + 950, + 113 + ], + [ + 960, + 105 + ], + [ + 943, + 105 + ], + [ + 923, + 98 + ], + [ + 911, + 94 + ], + [ + 906, + 85 + ], + [ + 928, + 73 + ], + [ + 915, + 64 + ], + [ + 902, + 54 + ], + [ + 916, + 46 + ], + [ + 943, + 51 + ], + [ + 931, + 43 + ], + [ + 925, + 32 + ], + [ + 934, + 19 + ], + [ + 942, + 23 + ], + [ + 942, + 33 + ], + [ + 953, + 27 + ], + [ + 953, + 19 + ], + [ + 951, + 10 + ], + [ + 949, + 1 + ], + [ + 990, + 1 + ], + [ + 1000, + 7 + ], + [ + 1004, + 14 + ], + [ + 1010, + 19 + ], + [ + 1012, + 13 + ], + [ + 1014, + 4 + ], + [ + 1011, + 1 + ], + [ + 1837, + 1 + ], + [ + 1831, + 7 + ], + [ + 1790, + 21 + ], + [ + 1778, + 35 + ], + [ + 1784, + 48 + ], + [ + 1763, + 58 + ], + [ + 1763, + 50 + ], + [ + 1767, + 31 + ], + [ + 1720, + 54 + ], + [ + 1729, + 79 + ], + [ + 1759, + 91 + ], + [ + 1778, + 107 + ], + [ + 1762, + 116 + ], + [ + 1661, + 120 + ], + [ + 1614, + 138 + ], + [ + 1584, + 143 + ], + [ + 1545, + 167 + ], + [ + 1526, + 165 + ], + [ + 1501, + 168 + ], + [ + 1511, + 186 + ], + [ + 1515, + 194 + ], + [ + 1489, + 208 + ], + [ + 1472, + 204 + ], + [ + 1446, + 215 + ], + [ + 1433, + 215 + ], + [ + 1444, + 257 + ], + [ + 1393, + 264 + ], + [ + 1391, + 240 + ], + [ + 1378, + 224 + ], + [ + 1387, + 207 + ], + [ + 1376, + 195 + ], + [ + 1361, + 194 + ], + [ + 1350, + 192 + ], + [ + 1342, + 202 + ], + [ + 1354, + 209 + ], + [ + 1357, + 217 + ], + [ + 1336, + 215 + ], + [ + 1333, + 225 + ], + [ + 1354, + 238 + ], + [ + 1333, + 252 + ], + [ + 1294, + 270 + ], + [ + 1280, + 273 + ], + [ + 1275, + 345 + ], + [ + 1280, + 417 + ], + [ + 1291, + 450 + ], + [ + 1254, + 448 + ], + [ + 1255, + 416 + ], + [ + 1253, + 336 + ], + [ + 1249, + 292 + ], + [ + 1232, + 294 + ], + [ + 1226, + 292 + ], + [ + 1210, + 304 + ], + [ + 1197, + 316 + ], + [ + 1193, + 336 + ], + [ + 1194, + 375 + ], + [ + 1197, + 412 + ], + [ + 1005, + 417 + ], + [ + 1005, + 404 + ], + [ + 992, + 400 + ], + [ + 982, + 399 + ], + [ + 976, + 394 + ], + [ + 973, + 389 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1006, + 374 + ], + [ + 1007, + 387 + ], + [ + 1038, + 387 + ], + [ + 1037, + 375 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1000, + 445 + ], + [ + 991, + 443 + ], + [ + 990, + 432 + ], + [ + 992, + 418 + ], + [ + 998, + 409 + ], + [ + 1003, + 406 + ], + [ + 1014, + 405 + ], + [ + 1025, + 404 + ], + [ + 1031, + 407 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1001, + 455 + ], + [ + 999, + 440 + ], + [ + 1000, + 425 + ], + [ + 1007, + 414 + ], + [ + 1014, + 409 + ], + [ + 1035, + 407 + ], + [ + 1038, + 434 + ], + [ + 1028, + 454 + ], + [ + 1013, + 453 + ], + [ + 1009, + 457 + ], + [ + 1003, + 456 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1060, + 400 + ], + [ + 1036, + 401 + ], + [ + 1030, + 422 + ], + [ + 1027, + 420 + ], + [ + 1022, + 420 + ], + [ + 1021, + 424 + ], + [ + 1022, + 427 + ], + [ + 1029, + 428 + ], + [ + 1027, + 434 + ], + [ + 1026, + 439 + ], + [ + 1027, + 456 + ], + [ + 1026, + 466 + ], + [ + 1031, + 471 + ], + [ + 1039, + 472 + ], + [ + 1042, + 465 + ], + [ + 1055, + 465 + ], + [ + 1064, + 410 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1176, + 400 + ], + [ + 1172, + 383 + ], + [ + 1167, + 373 + ], + [ + 1163, + 371 + ], + [ + 1117, + 370 + ], + [ + 1069, + 372 + ], + [ + 1064, + 377 + ], + [ + 1054, + 408 + ], + [ + 1051, + 410 + ], + [ + 1047, + 413 + ], + [ + 1047, + 418 + ], + [ + 1051, + 420 + ], + [ + 1050, + 442 + ], + [ + 1047, + 469 + ], + [ + 1048, + 484 + ], + [ + 1050, + 487 + ], + [ + 1053, + 489 + ], + [ + 1053, + 493 + ], + [ + 1061, + 496 + ], + [ + 1068, + 495 + ], + [ + 1070, + 483 + ], + [ + 1084, + 485 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1297, + 421 + ], + [ + 1296, + 404 + ], + [ + 1300, + 397 + ], + [ + 1299, + 392 + ], + [ + 1297, + 389 + ], + [ + 1297, + 382 + ], + [ + 1295, + 374 + ], + [ + 1289, + 372 + ], + [ + 1286, + 374 + ], + [ + 1286, + 383 + ], + [ + 1286, + 387 + ], + [ + 1278, + 388 + ], + [ + 1280, + 416 + ], + [ + 1282, + 425 + ], + [ + 1296, + 425 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1314, + 371 + ], + [ + 1295, + 376 + ], + [ + 1278, + 376 + ], + [ + 1278, + 370 + ], + [ + 1281, + 368 + ], + [ + 1296, + 364 + ], + [ + 1317, + 367 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1320, + 418 + ], + [ + 1247, + 420 + ], + [ + 1219, + 424 + ], + [ + 1218, + 436 + ], + [ + 1211, + 485 + ], + [ + 1209, + 521 + ], + [ + 1306, + 563 + ], + [ + 1331, + 466 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1313, + 255 + ], + [ + 1313, + 286 + ], + [ + 1312, + 299 + ], + [ + 1317, + 439 + ], + [ + 1479, + 436 + ], + [ + 1480, + 359 + ], + [ + 1473, + 251 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1208, + 418 + ], + [ + 1201, + 405 + ], + [ + 1195, + 401 + ], + [ + 1168, + 397 + ], + [ + 1121, + 397 + ], + [ + 1109, + 398 + ], + [ + 1106, + 401 + ], + [ + 1094, + 432 + ], + [ + 1091, + 432 + ], + [ + 1087, + 429 + ], + [ + 1079, + 429 + ], + [ + 1074, + 431 + ], + [ + 1074, + 435 + ], + [ + 1076, + 439 + ], + [ + 1084, + 440 + ], + [ + 1083, + 450 + ], + [ + 1081, + 482 + ], + [ + 1081, + 497 + ], + [ + 1084, + 500 + ], + [ + 1085, + 507 + ], + [ + 1091, + 510 + ], + [ + 1106, + 510 + ], + [ + 1109, + 500 + ], + [ + 1169, + 500 + ], + [ + 1194, + 504 + ], + [ + 1226, + 470 + ], + [ + 1224, + 442 + ], + [ + 1216, + 429 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1136, + 443 + ], + [ + 1135, + 453 + ], + [ + 1179, + 454 + ], + [ + 1178, + 444 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 739, + 419 + ], + [ + 719, + 394 + ], + [ + 699, + 393 + ], + [ + 666, + 393 + ], + [ + 623, + 397 + ], + [ + 606, + 400 + ], + [ + 595, + 411 + ], + [ + 683, + 477 + ], + [ + 715, + 474 + ], + [ + 719, + 474 + ], + [ + 728, + 473 + ], + [ + 740, + 468 + ], + [ + 745, + 443 + ], + [ + 742, + 433 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 691, + 436 + ], + [ + 682, + 422 + ], + [ + 667, + 412 + ], + [ + 652, + 413 + ], + [ + 649, + 479 + ], + [ + 657, + 486 + ], + [ + 666, + 488 + ], + [ + 676, + 484 + ], + [ + 686, + 476 + ], + [ + 690, + 468 + ], + [ + 695, + 463 + ], + [ + 695, + 451 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 651, + 487 + ], + [ + 642, + 491 + ], + [ + 631, + 494 + ], + [ + 598, + 436 + ], + [ + 603, + 411 + ], + [ + 622, + 410 + ], + [ + 646, + 411 + ], + [ + 657, + 416 + ], + [ + 674, + 429 + ], + [ + 682, + 438 + ], + [ + 685, + 449 + ], + [ + 690, + 456 + ], + [ + 686, + 467 + ], + [ + 681, + 472 + ], + [ + 657, + 475 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 603, + 508 + ], + [ + 617, + 505 + ], + [ + 627, + 500 + ], + [ + 632, + 490 + ], + [ + 641, + 474 + ], + [ + 641, + 464 + ], + [ + 638, + 455 + ], + [ + 638, + 442 + ], + [ + 633, + 433 + ], + [ + 612, + 414 + ], + [ + 591, + 408 + ], + [ + 564, + 405 + ], + [ + 509, + 408 + ], + [ + 480, + 410 + ], + [ + 460, + 416 + ], + [ + 453, + 423 + ], + [ + 576, + 496 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 543, + 422 + ], + [ + 502, + 422 + ], + [ + 483, + 426 + ], + [ + 538, + 510 + ], + [ + 576, + 511 + ], + [ + 585, + 515 + ], + [ + 592, + 516 + ], + [ + 601, + 514 + ], + [ + 609, + 507 + ], + [ + 616, + 496 + ], + [ + 629, + 492 + ], + [ + 637, + 486 + ], + [ + 639, + 474 + ], + [ + 636, + 458 + ], + [ + 632, + 446 + ], + [ + 622, + 439 + ], + [ + 604, + 439 + ], + [ + 580, + 437 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 534, + 518 + ], + [ + 525, + 526 + ], + [ + 511, + 528 + ], + [ + 501, + 524 + ], + [ + 495, + 519 + ], + [ + 491, + 515 + ], + [ + 472, + 516 + ], + [ + 466, + 526 + ], + [ + 452, + 531 + ], + [ + 443, + 531 + ], + [ + 439, + 527 + ], + [ + 403, + 419 + ], + [ + 433, + 417 + ], + [ + 469, + 419 + ], + [ + 486, + 424 + ], + [ + 522, + 439 + ], + [ + 545, + 440 + ], + [ + 558, + 443 + ], + [ + 565, + 450 + ], + [ + 568, + 457 + ], + [ + 569, + 463 + ], + [ + 574, + 469 + ], + [ + 576, + 475 + ], + [ + 578, + 496 + ], + [ + 572, + 506 + ], + [ + 567, + 510 + ], + [ + 538, + 512 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 422, + 431 + ], + [ + 406, + 419 + ], + [ + 398, + 415 + ], + [ + 380, + 414 + ], + [ + 369, + 417 + ], + [ + 418, + 539 + ], + [ + 433, + 536 + ], + [ + 449, + 525 + ], + [ + 461, + 506 + ], + [ + 458, + 480 + ], + [ + 448, + 453 + ], + [ + 441, + 448 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 441, + 476 + ], + [ + 432, + 467 + ], + [ + 432, + 447 + ], + [ + 426, + 440 + ], + [ + 417, + 437 + ], + [ + 393, + 431 + ], + [ + 377, + 550 + ], + [ + 384, + 553 + ], + [ + 410, + 548 + ], + [ + 421, + 534 + ], + [ + 430, + 528 + ], + [ + 440, + 515 + ], + [ + 444, + 496 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 259, + 390 + ], + [ + 207, + 389 + ], + [ + 153, + 391 + ], + [ + 120, + 395 + ], + [ + 106, + 401 + ], + [ + 91, + 413 + ], + [ + 311, + 551 + ], + [ + 328, + 559 + ], + [ + 341, + 567 + ], + [ + 362, + 566 + ], + [ + 377, + 558 + ], + [ + 392, + 540 + ], + [ + 408, + 531 + ], + [ + 422, + 516 + ], + [ + 423, + 504 + ], + [ + 421, + 486 + ], + [ + 413, + 477 + ], + [ + 412, + 458 + ], + [ + 408, + 442 + ], + [ + 400, + 435 + ], + [ + 395, + 431 + ], + [ + 367, + 405 + ], + [ + 366, + 400 + ], + [ + 347, + 395 + ], + [ + 303, + 390 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 363, + 462 + ], + [ + 367, + 475 + ], + [ + 398, + 471 + ], + [ + 393, + 459 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 155, + 406 + ], + [ + 89, + 408 + ], + [ + 51, + 412 + ], + [ + 25, + 417 + ], + [ + 207, + 568 + ], + [ + 237, + 570 + ], + [ + 249, + 580 + ], + [ + 267, + 581 + ], + [ + 281, + 577 + ], + [ + 292, + 568 + ], + [ + 305, + 554 + ], + [ + 311, + 539 + ], + [ + 316, + 522 + ], + [ + 311, + 496 + ], + [ + 307, + 486 + ], + [ + 299, + 454 + ], + [ + 279, + 437 + ], + [ + 259, + 419 + ], + [ + 246, + 413 + ], + [ + 214, + 409 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 259, + 510 + ], + [ + 260, + 525 + ], + [ + 298, + 521 + ], + [ + 297, + 506 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 128, + 590 + ], + [ + 139, + 604 + ], + [ + 148, + 612 + ], + [ + 162, + 613 + ], + [ + 178, + 609 + ], + [ + 195, + 597 + ], + [ + 206, + 582 + ], + [ + 211, + 571 + ], + [ + 234, + 567 + ], + [ + 245, + 554 + ], + [ + 250, + 533 + ], + [ + 249, + 503 + ], + [ + 240, + 492 + ], + [ + 237, + 475 + ], + [ + 229, + 465 + ], + [ + 218, + 457 + ], + [ + 165, + 456 + ], + [ + 145, + 445 + ], + [ + 74, + 421 + ], + [ + 33, + 416 + ], + [ + 1, + 416 + ], + [ + 0, + 544 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 170, + 543 + ], + [ + 167, + 525 + ], + [ + 222, + 519 + ], + [ + 223, + 537 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 32, + 465 + ], + [ + 0, + 447 + ], + [ + 0, + 649 + ], + [ + 33, + 638 + ], + [ + 49, + 628 + ], + [ + 57, + 615 + ], + [ + 59, + 610 + ], + [ + 94, + 602 + ], + [ + 127, + 591 + ], + [ + 138, + 570 + ], + [ + 143, + 546 + ], + [ + 139, + 535 + ], + [ + 132, + 518 + ], + [ + 132, + 504 + ], + [ + 127, + 488 + ], + [ + 115, + 478 + ], + [ + 104, + 471 + ], + [ + 62, + 468 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 91, + 524 + ], + [ + 82, + 503 + ], + [ + 6, + 510 + ], + [ + 12, + 533 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 836, + 507 + ], + [ + 837, + 480 + ], + [ + 834, + 468 + ], + [ + 834, + 456 + ], + [ + 837, + 439 + ], + [ + 832, + 436 + ], + [ + 829, + 432 + ], + [ + 831, + 427 + ], + [ + 843, + 426 + ], + [ + 849, + 414 + ], + [ + 858, + 403 + ], + [ + 863, + 400 + ], + [ + 871, + 396 + ], + [ + 922, + 394 + ], + [ + 938, + 396 + ], + [ + 944, + 403 + ], + [ + 954, + 424 + ], + [ + 966, + 424 + ], + [ + 970, + 426 + ], + [ + 971, + 436 + ], + [ + 968, + 437 + ], + [ + 961, + 439 + ], + [ + 964, + 449 + ], + [ + 966, + 460 + ], + [ + 966, + 473 + ], + [ + 965, + 479 + ], + [ + 965, + 502 + ], + [ + 964, + 507 + ], + [ + 950, + 508 + ], + [ + 949, + 504 + ], + [ + 949, + 495 + ], + [ + 935, + 493 + ], + [ + 922, + 492 + ], + [ + 890, + 492 + ], + [ + 861, + 495 + ], + [ + 857, + 504 + ], + [ + 854, + 511 + ], + [ + 843, + 511 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 919, + 456 + ], + [ + 919, + 445 + ], + [ + 879, + 446 + ], + [ + 879, + 457 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1179, + 298 + ], + [ + 1177, + 524 + ], + [ + 1183, + 524 + ], + [ + 1183, + 297 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1150, + 523 + ], + [ + 1132, + 524 + ], + [ + 1123, + 527 + ], + [ + 1123, + 541 + ], + [ + 1162, + 570 + ], + [ + 1233, + 625 + ], + [ + 1381, + 727 + ], + [ + 1649, + 919 + ], + [ + 1750, + 990 + ], + [ + 1824, + 1024 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 834 + ], + [ + 1225, + 521 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1845, + 369 + ], + [ + 1837, + 364 + ], + [ + 1834, + 350 + ], + [ + 1830, + 347 + ], + [ + 1819, + 347 + ], + [ + 1812, + 355 + ], + [ + 1811, + 366 + ], + [ + 1813, + 370 + ], + [ + 1805, + 375 + ], + [ + 1802, + 383 + ], + [ + 1852, + 383 + ], + [ + 1849, + 370 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 917, + 318 + ], + [ + 917, + 337 + ], + [ + 926, + 337 + ], + [ + 926, + 318 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1850, + 347 + ], + [ + 1781, + 347 + ], + [ + 1783, + 341 + ], + [ + 1796, + 337 + ], + [ + 1816, + 330 + ], + [ + 1838, + 333 + ], + [ + 1851, + 338 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1772, + 377 + ], + [ + 1776, + 350 + ], + [ + 1763, + 339 + ], + [ + 1735, + 345 + ], + [ + 1710, + 362 + ], + [ + 1703, + 392 + ], + [ + 1772, + 387 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1748, + 355 + ], + [ + 1742, + 352 + ], + [ + 1736, + 353 + ], + [ + 1732, + 362 + ], + [ + 1731, + 367 + ], + [ + 1729, + 371 + ], + [ + 1721, + 373 + ], + [ + 1715, + 378 + ], + [ + 1711, + 388 + ], + [ + 1760, + 386 + ], + [ + 1753, + 375 + ], + [ + 1746, + 371 + ], + [ + 1748, + 365 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1830, + 408 + ], + [ + 1822, + 1 + ], + [ + 1805, + 1 + ], + [ + 1817, + 384 + ], + [ + 1819, + 403 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1930, + 105 + ], + [ + 1928, + 11 + ], + [ + 1822, + 9 + ], + [ + 1824, + 106 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1929, + 118 + ], + [ + 1824, + 116 + ], + [ + 1825, + 163 + ], + [ + 1929, + 165 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1605, + 342 + ], + [ + 1575, + 343 + ], + [ + 1553, + 345 + ], + [ + 1500, + 345 + ], + [ + 1477, + 306 + ], + [ + 1500, + 284 + ], + [ + 1522, + 270 + ], + [ + 1541, + 269 + ], + [ + 1569, + 264 + ], + [ + 1604, + 272 + ], + [ + 1633, + 286 + ], + [ + 1649, + 314 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1579, + 364 + ], + [ + 1560, + 355 + ], + [ + 1555, + 344 + ], + [ + 1555, + 330 + ], + [ + 1551, + 320 + ], + [ + 1544, + 316 + ], + [ + 1537, + 316 + ], + [ + 1528, + 319 + ], + [ + 1518, + 328 + ], + [ + 1514, + 358 + ], + [ + 1520, + 405 + ], + [ + 1601, + 395 + ], + [ + 1587, + 378 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1514, + 313 + ], + [ + 1504, + 315 + ], + [ + 1498, + 320 + ], + [ + 1492, + 333 + ], + [ + 1491, + 338 + ], + [ + 1498, + 343 + ], + [ + 1498, + 346 + ], + [ + 1490, + 348 + ], + [ + 1479, + 355 + ], + [ + 1474, + 361 + ], + [ + 1474, + 379 + ], + [ + 1467, + 395 + ], + [ + 1547, + 397 + ], + [ + 1546, + 377 + ], + [ + 1547, + 363 + ], + [ + 1544, + 360 + ], + [ + 1531, + 351 + ], + [ + 1532, + 338 + ], + [ + 1530, + 328 + ], + [ + 1524, + 317 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 1374, + 398 + ], + [ + 1311, + 400 + ], + [ + 1312, + 434 + ], + [ + 1314, + 437 + ], + [ + 1318, + 462 + ], + [ + 1302, + 461 + ], + [ + 1302, + 465 + ], + [ + 1317, + 469 + ], + [ + 1320, + 481 + ], + [ + 1297, + 485 + ], + [ + 1303, + 573 + ], + [ + 1399, + 617 + ], + [ + 1505, + 668 + ], + [ + 2047, + 932 + ], + [ + 2047, + 369 + ], + [ + 1650, + 387 + ], + [ + 1485, + 393 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1195, + 456 + ], + [ + 1188, + 469 + ], + [ + 1188, + 520 + ], + [ + 1178, + 529 + ], + [ + 1190, + 535 + ], + [ + 1208, + 534 + ], + [ + 1205, + 466 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1197, + 539 + ], + [ + 1207, + 544 + ], + [ + 1240, + 543 + ], + [ + 1239, + 534 + ], + [ + 1232, + 528 + ], + [ + 1225, + 471 + ], + [ + 1215, + 455 + ], + [ + 1207, + 470 + ], + [ + 1207, + 533 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1243, + 443 + ], + [ + 1231, + 463 + ], + [ + 1236, + 543 + ], + [ + 1224, + 554 + ], + [ + 1232, + 558 + ], + [ + 1265, + 556 + ], + [ + 1273, + 552 + ], + [ + 1259, + 541 + ], + [ + 1255, + 460 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1287, + 461 + ], + [ + 1272, + 443 + ], + [ + 1261, + 463 + ], + [ + 1265, + 555 + ], + [ + 1252, + 568 + ], + [ + 1267, + 573 + ], + [ + 1304, + 568 + ], + [ + 1302, + 558 + ], + [ + 1296, + 552 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1163, + 280 + ], + [ + 1162, + 300 + ], + [ + 1202, + 300 + ], + [ + 1203, + 280 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1203, + 260 + ], + [ + 1162, + 260 + ], + [ + 1162, + 279 + ], + [ + 1203, + 279 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1203, + 241 + ], + [ + 1161, + 242 + ], + [ + 1162, + 261 + ], + [ + 1202, + 260 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1161, + 201 + ], + [ + 1161, + 242 + ], + [ + 1205, + 242 + ], + [ + 1204, + 202 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1249, + 267 + ], + [ + 1250, + 297 + ], + [ + 1279, + 295 + ], + [ + 1277, + 265 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1265, + 310 + ], + [ + 1250, + 311 + ], + [ + 1250, + 296 + ], + [ + 1279, + 295 + ], + [ + 1278, + 309 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1278, + 325 + ], + [ + 1278, + 309 + ], + [ + 1251, + 310 + ], + [ + 1251, + 327 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1279, + 340 + ], + [ + 1278, + 325 + ], + [ + 1252, + 327 + ], + [ + 1253, + 341 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1557, + 66 + ], + [ + 1556, + 80 + ], + [ + 1574, + 116 + ], + [ + 1576, + 137 + ], + [ + 1559, + 166 + ], + [ + 1549, + 167 + ], + [ + 1526, + 143 + ], + [ + 1526, + 118 + ], + [ + 1533, + 97 + ], + [ + 1544, + 81 + ], + [ + 1545, + 69 + ], + [ + 1533, + 62 + ], + [ + 1538, + 39 + ], + [ + 1572, + 41 + ], + [ + 1607, + 60 + ], + [ + 1628, + 82 + ], + [ + 1661, + 51 + ], + [ + 1685, + 41 + ], + [ + 1708, + 38 + ], + [ + 1718, + 40 + ], + [ + 1715, + 83 + ], + [ + 1725, + 100 + ], + [ + 1731, + 122 + ], + [ + 1732, + 150 + ], + [ + 1710, + 168 + ], + [ + 1695, + 170 + ], + [ + 1671, + 143 + ], + [ + 1680, + 121 + ], + [ + 1689, + 100 + ], + [ + 1703, + 81 + ], + [ + 1704, + 66 + ], + [ + 1680, + 71 + ], + [ + 1649, + 91 + ], + [ + 1638, + 113 + ], + [ + 1637, + 139 + ], + [ + 1639, + 388 + ], + [ + 1627, + 388 + ], + [ + 1623, + 114 + ], + [ + 1607, + 90 + ], + [ + 1585, + 74 + ], + [ + 1567, + 67 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 855, + 364 + ], + [ + 850, + 366 + ], + [ + 850, + 369 + ], + [ + 851, + 373 + ], + [ + 855, + 374 + ], + [ + 860, + 371 + ], + [ + 860, + 365 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000067_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000067_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..4e84ba1b0971b00228783314e82ea9c35e4f0ef5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000067_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000068_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000068_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ff542026970c281a57366b088cf21ad11a7b9898 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000068_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000068_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000068_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..2fca96c3ceaad8ad29ba68d3e9607e139678210a --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000068_000019_gtFine_polygons.json @@ -0,0 +1,7535 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 0, + 485 + ], + [ + 181, + 339 + ], + [ + 171, + 1 + ], + [ + 0, + 0 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 56, + 495 + ], + [ + 0, + 506 + ], + [ + 0, + 410 + ], + [ + 10, + 403 + ], + [ + 15, + 394 + ], + [ + 2, + 387 + ], + [ + 13, + 381 + ], + [ + 13, + 377 + ], + [ + 24, + 367 + ], + [ + 18, + 358 + ], + [ + 19, + 357 + ], + [ + 15, + 335 + ], + [ + 26, + 357 + ], + [ + 30, + 355 + ], + [ + 35, + 346 + ], + [ + 33, + 337 + ], + [ + 45, + 330 + ], + [ + 64, + 334 + ], + [ + 68, + 335 + ], + [ + 68, + 339 + ], + [ + 75, + 340 + ], + [ + 83, + 330 + ], + [ + 93, + 330 + ], + [ + 101, + 322 + ], + [ + 123, + 325 + ], + [ + 80, + 482 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2049, + 445 + ], + [ + 1972, + 451 + ], + [ + 1536, + 481 + ], + [ + 901, + 520 + ], + [ + 385, + 576 + ], + [ + 0, + 669 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 535, + 589 + ], + [ + 379, + 630 + ], + [ + 207, + 677 + ], + [ + 52, + 719 + ], + [ + 0, + 733 + ], + [ + 0, + 540 + ], + [ + 241, + 463 + ], + [ + 1127, + 414 + ], + [ + 2048, + 373 + ], + [ + 2048, + 458 + ], + [ + 2009, + 462 + ], + [ + 1953, + 467 + ], + [ + 1833, + 477 + ], + [ + 1639, + 492 + ], + [ + 1461, + 505 + ], + [ + 1330, + 514 + ], + [ + 1142, + 528 + ], + [ + 1009, + 538 + ], + [ + 879, + 548 + ], + [ + 776, + 556 + ], + [ + 702, + 562 + ], + [ + 616, + 574 + ], + [ + 567, + 582 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1993, + 497 + ], + [ + 1946, + 503 + ], + [ + 1902, + 507 + ], + [ + 1845, + 511 + ], + [ + 1781, + 519 + ], + [ + 1744, + 523 + ], + [ + 1693, + 530 + ], + [ + 1668, + 536 + ], + [ + 1642, + 544 + ], + [ + 1626, + 548 + ], + [ + 1575, + 558 + ], + [ + 1539, + 571 + ], + [ + 1504, + 586 + ], + [ + 1488, + 598 + ], + [ + 1488, + 624 + ], + [ + 1496, + 640 + ], + [ + 1512, + 656 + ], + [ + 1571, + 692 + ], + [ + 1674, + 752 + ], + [ + 1822, + 837 + ], + [ + 2048, + 962 + ], + [ + 2048, + 489 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 79, + 60 + ], + [ + 94, + 336 + ], + [ + 143, + 503 + ], + [ + 268, + 509 + ], + [ + 450, + 502 + ], + [ + 681, + 496 + ], + [ + 823, + 497 + ], + [ + 871, + 488 + ], + [ + 1002, + 482 + ], + [ + 1050, + 479 + ], + [ + 1498, + 458 + ], + [ + 2048, + 429 + ], + [ + 2048, + 1 + ], + [ + 116, + 1 + ], + [ + 116, + 6 + ], + [ + 98, + 7 + ], + [ + 97, + 28 + ], + [ + 90, + 28 + ], + [ + 90, + 61 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 242, + 455 + ], + [ + 242, + 448 + ], + [ + 238, + 445 + ], + [ + 233, + 446 + ], + [ + 232, + 448 + ], + [ + 233, + 452 + ], + [ + 233, + 455 + ], + [ + 228, + 456 + ], + [ + 224, + 464 + ], + [ + 222, + 478 + ], + [ + 219, + 495 + ], + [ + 248, + 498 + ], + [ + 248, + 471 + ], + [ + 246, + 461 + ], + [ + 244, + 457 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 274, + 513 + ], + [ + 276, + 500 + ], + [ + 276, + 495 + ], + [ + 277, + 492 + ], + [ + 276, + 485 + ], + [ + 278, + 482 + ], + [ + 279, + 478 + ], + [ + 277, + 466 + ], + [ + 276, + 455 + ], + [ + 270, + 451 + ], + [ + 271, + 447 + ], + [ + 270, + 443 + ], + [ + 266, + 442 + ], + [ + 263, + 444 + ], + [ + 263, + 450 + ], + [ + 257, + 455 + ], + [ + 255, + 466 + ], + [ + 254, + 489 + ], + [ + 253, + 512 + ], + [ + 268, + 529 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 316, + 488 + ], + [ + 317, + 475 + ], + [ + 318, + 470 + ], + [ + 322, + 466 + ], + [ + 322, + 461 + ], + [ + 318, + 452 + ], + [ + 314, + 449 + ], + [ + 312, + 446 + ], + [ + 312, + 442 + ], + [ + 309, + 439 + ], + [ + 305, + 440 + ], + [ + 303, + 445 + ], + [ + 303, + 449 + ], + [ + 299, + 452 + ], + [ + 296, + 462 + ], + [ + 299, + 471 + ], + [ + 300, + 474 + ], + [ + 305, + 487 + ], + [ + 305, + 505 + ], + [ + 307, + 508 + ], + [ + 312, + 509 + ], + [ + 317, + 509 + ], + [ + 318, + 506 + ], + [ + 317, + 494 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 339, + 449 + ], + [ + 338, + 444 + ], + [ + 334, + 441 + ], + [ + 329, + 443 + ], + [ + 328, + 447 + ], + [ + 328, + 454 + ], + [ + 326, + 462 + ], + [ + 330, + 475 + ], + [ + 332, + 478 + ], + [ + 328, + 500 + ], + [ + 326, + 509 + ], + [ + 330, + 510 + ], + [ + 345, + 508 + ], + [ + 342, + 500 + ], + [ + 342, + 479 + ], + [ + 347, + 474 + ], + [ + 350, + 466 + ], + [ + 345, + 453 + ], + [ + 342, + 451 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 452, + 468 + ], + [ + 450, + 472 + ], + [ + 449, + 484 + ], + [ + 442, + 490 + ], + [ + 438, + 506 + ], + [ + 442, + 516 + ], + [ + 449, + 518 + ], + [ + 457, + 513 + ], + [ + 462, + 506 + ], + [ + 464, + 497 + ], + [ + 463, + 489 + ], + [ + 462, + 483 + ], + [ + 466, + 485 + ], + [ + 469, + 493 + ], + [ + 472, + 498 + ], + [ + 480, + 500 + ], + [ + 478, + 470 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 521, + 504 + ], + [ + 514, + 511 + ], + [ + 507, + 512 + ], + [ + 500, + 504 + ], + [ + 495, + 480 + ], + [ + 499, + 469 + ], + [ + 519, + 471 + ], + [ + 527, + 479 + ], + [ + 526, + 497 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 446, + 402 + ], + [ + 457, + 395 + ], + [ + 483, + 390 + ], + [ + 525, + 397 + ], + [ + 527, + 400 + ], + [ + 523, + 402 + ], + [ + 527, + 478 + ], + [ + 518, + 483 + ], + [ + 504, + 484 + ], + [ + 506, + 516 + ], + [ + 495, + 516 + ], + [ + 494, + 513 + ], + [ + 481, + 515 + ], + [ + 480, + 517 + ], + [ + 477, + 517 + ], + [ + 475, + 485 + ], + [ + 454, + 485 + ], + [ + 449, + 406 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 33, + 611 + ], + [ + 0, + 611 + ], + [ + 0, + 515 + ], + [ + 235, + 519 + ], + [ + 234, + 600 + ], + [ + 99, + 609 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 149, + 335 + ], + [ + 129, + 334 + ], + [ + 123, + 325 + ], + [ + 101, + 321 + ], + [ + 92, + 329 + ], + [ + 82, + 330 + ], + [ + 74, + 340 + ], + [ + 68, + 340 + ], + [ + 68, + 335 + ], + [ + 45, + 329 + ], + [ + 32, + 337 + ], + [ + 34, + 346 + ], + [ + 28, + 357 + ], + [ + 18, + 341 + ], + [ + 19, + 336 + ], + [ + 15, + 336 + ], + [ + 13, + 341 + ], + [ + 17, + 344 + ], + [ + 18, + 357 + ], + [ + 15, + 359 + ], + [ + 25, + 363 + ], + [ + 22, + 368 + ], + [ + 11, + 377 + ], + [ + 9, + 381 + ], + [ + 0, + 387 + ], + [ + 7, + 392 + ], + [ + 6, + 399 + ], + [ + 8, + 403 + ], + [ + 0, + 408 + ], + [ + 0, + 545 + ], + [ + 23, + 550 + ], + [ + 32, + 547 + ], + [ + 47, + 536 + ], + [ + 61, + 533 + ], + [ + 80, + 534 + ], + [ + 89, + 538 + ], + [ + 74, + 550 + ], + [ + 73, + 565 + ], + [ + 101, + 584 + ], + [ + 118, + 572 + ], + [ + 145, + 580 + ], + [ + 159, + 580 + ], + [ + 178, + 586 + ], + [ + 183, + 599 + ], + [ + 195, + 607 + ], + [ + 221, + 612 + ], + [ + 238, + 609 + ], + [ + 238, + 594 + ], + [ + 242, + 588 + ], + [ + 257, + 593 + ], + [ + 272, + 593 + ], + [ + 292, + 581 + ], + [ + 290, + 568 + ], + [ + 294, + 556 + ], + [ + 286, + 553 + ], + [ + 296, + 535 + ], + [ + 306, + 522 + ], + [ + 296, + 513 + ], + [ + 283, + 504 + ], + [ + 273, + 495 + ], + [ + 266, + 474 + ], + [ + 258, + 470 + ], + [ + 245, + 470 + ], + [ + 240, + 476 + ], + [ + 233, + 479 + ], + [ + 214, + 476 + ], + [ + 206, + 472 + ], + [ + 197, + 472 + ], + [ + 196, + 464 + ], + [ + 206, + 457 + ], + [ + 214, + 452 + ], + [ + 205, + 446 + ], + [ + 212, + 435 + ], + [ + 227, + 435 + ], + [ + 236, + 428 + ], + [ + 227, + 405 + ], + [ + 224, + 388 + ], + [ + 236, + 390 + ], + [ + 244, + 384 + ], + [ + 218, + 376 + ], + [ + 223, + 366 + ], + [ + 224, + 351 + ], + [ + 216, + 342 + ], + [ + 203, + 344 + ], + [ + 191, + 337 + ], + [ + 179, + 333 + ], + [ + 177, + 324 + ], + [ + 163, + 320 + ], + [ + 155, + 324 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 35, + 670 + ], + [ + 30, + 656 + ], + [ + 28, + 651 + ], + [ + 25, + 497 + ], + [ + 27, + 488 + ], + [ + 24, + 479 + ], + [ + 6, + 480 + ], + [ + 2, + 486 + ], + [ + 3, + 492 + ], + [ + 7, + 498 + ], + [ + 9, + 654 + ], + [ + 0, + 666 + ], + [ + 0, + 673 + ], + [ + 22, + 674 + ], + [ + 35, + 673 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 129, + 646 + ], + [ + 131, + 643 + ], + [ + 137, + 638 + ], + [ + 139, + 625 + ], + [ + 109, + 498 + ], + [ + 106, + 489 + ], + [ + 107, + 484 + ], + [ + 111, + 481 + ], + [ + 118, + 480 + ], + [ + 123, + 485 + ], + [ + 156, + 624 + ], + [ + 161, + 627 + ], + [ + 169, + 637 + ], + [ + 171, + 644 + ], + [ + 147, + 648 + ], + [ + 133, + 649 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 245, + 623 + ], + [ + 245, + 620 + ], + [ + 252, + 607 + ], + [ + 244, + 476 + ], + [ + 247, + 470 + ], + [ + 251, + 467 + ], + [ + 257, + 468 + ], + [ + 263, + 470 + ], + [ + 265, + 473 + ], + [ + 264, + 480 + ], + [ + 265, + 487 + ], + [ + 271, + 496 + ], + [ + 289, + 511 + ], + [ + 306, + 520 + ], + [ + 322, + 524 + ], + [ + 337, + 525 + ], + [ + 351, + 523 + ], + [ + 369, + 515 + ], + [ + 385, + 503 + ], + [ + 396, + 491 + ], + [ + 402, + 483 + ], + [ + 399, + 474 + ], + [ + 400, + 468 + ], + [ + 406, + 466 + ], + [ + 412, + 467 + ], + [ + 414, + 471 + ], + [ + 414, + 480 + ], + [ + 417, + 574 + ], + [ + 419, + 579 + ], + [ + 420, + 584 + ], + [ + 426, + 587 + ], + [ + 424, + 590 + ], + [ + 400, + 591 + ], + [ + 398, + 589 + ], + [ + 404, + 584 + ], + [ + 403, + 497 + ], + [ + 388, + 508 + ], + [ + 368, + 521 + ], + [ + 351, + 527 + ], + [ + 333, + 529 + ], + [ + 316, + 528 + ], + [ + 296, + 522 + ], + [ + 281, + 512 + ], + [ + 261, + 493 + ], + [ + 265, + 605 + ], + [ + 276, + 618 + ], + [ + 271, + 621 + ], + [ + 256, + 623 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 820, + 501 + ], + [ + 818, + 444 + ], + [ + 823, + 443 + ], + [ + 817, + 438 + ], + [ + 814, + 433 + ], + [ + 815, + 424 + ], + [ + 818, + 419 + ], + [ + 827, + 414 + ], + [ + 836, + 414 + ], + [ + 844, + 417 + ], + [ + 849, + 430 + ], + [ + 847, + 438 + ], + [ + 855, + 435 + ], + [ + 858, + 434 + ], + [ + 855, + 431 + ], + [ + 853, + 429 + ], + [ + 853, + 427 + ], + [ + 856, + 426 + ], + [ + 858, + 426 + ], + [ + 858, + 423 + ], + [ + 855, + 421 + ], + [ + 853, + 416 + ], + [ + 855, + 413 + ], + [ + 858, + 403 + ], + [ + 861, + 399 + ], + [ + 865, + 395 + ], + [ + 864, + 385 + ], + [ + 868, + 384 + ], + [ + 873, + 385 + ], + [ + 874, + 387 + ], + [ + 871, + 390 + ], + [ + 879, + 401 + ], + [ + 883, + 426 + ], + [ + 883, + 429 + ], + [ + 881, + 429 + ], + [ + 881, + 443 + ], + [ + 881, + 449 + ], + [ + 884, + 457 + ], + [ + 884, + 444 + ], + [ + 887, + 442 + ], + [ + 890, + 444 + ], + [ + 898, + 490 + ], + [ + 906, + 503 + ], + [ + 875, + 504 + ], + [ + 879, + 499 + ], + [ + 852, + 500 + ], + [ + 850, + 503 + ], + [ + 832, + 504 + ], + [ + 823, + 504 + ], + [ + 820, + 504 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 581, + 387 + ], + [ + 586, + 380 + ], + [ + 594, + 374 + ], + [ + 617, + 372 + ], + [ + 633, + 374 + ], + [ + 642, + 377 + ], + [ + 653, + 386 + ], + [ + 634, + 391 + ], + [ + 605, + 391 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 527, + 517 + ], + [ + 518, + 514 + ], + [ + 512, + 506 + ], + [ + 511, + 498 + ], + [ + 512, + 487 + ], + [ + 517, + 481 + ], + [ + 521, + 479 + ], + [ + 522, + 472 + ], + [ + 526, + 471 + ], + [ + 535, + 471 + ], + [ + 538, + 516 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 604, + 418 + ], + [ + 599, + 415 + ], + [ + 593, + 401 + ], + [ + 588, + 396 + ], + [ + 582, + 397 + ], + [ + 577, + 407 + ], + [ + 574, + 415 + ], + [ + 576, + 418 + ], + [ + 569, + 424 + ], + [ + 564, + 434 + ], + [ + 567, + 488 + ], + [ + 596, + 503 + ], + [ + 609, + 498 + ], + [ + 610, + 434 + ], + [ + 607, + 418 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 555, + 414 + ], + [ + 557, + 406 + ], + [ + 554, + 397 + ], + [ + 545, + 395 + ], + [ + 536, + 400 + ], + [ + 534, + 408 + ], + [ + 534, + 418 + ], + [ + 528, + 423 + ], + [ + 523, + 438 + ], + [ + 524, + 449 + ], + [ + 529, + 452 + ], + [ + 533, + 453 + ], + [ + 533, + 468 + ], + [ + 532, + 497 + ], + [ + 530, + 528 + ], + [ + 525, + 532 + ], + [ + 525, + 538 + ], + [ + 531, + 540 + ], + [ + 540, + 541 + ], + [ + 544, + 540 + ], + [ + 543, + 532 + ], + [ + 548, + 499 + ], + [ + 573, + 478 + ], + [ + 575, + 467 + ], + [ + 573, + 455 + ], + [ + 576, + 449 + ], + [ + 577, + 441 + ], + [ + 567, + 420 + ], + [ + 560, + 417 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 653, + 405 + ], + [ + 648, + 401 + ], + [ + 640, + 402 + ], + [ + 637, + 408 + ], + [ + 635, + 415 + ], + [ + 637, + 420 + ], + [ + 630, + 421 + ], + [ + 623, + 432 + ], + [ + 621, + 490 + ], + [ + 625, + 512 + ], + [ + 637, + 513 + ], + [ + 637, + 521 + ], + [ + 635, + 526 + ], + [ + 634, + 528 + ], + [ + 636, + 529 + ], + [ + 643, + 530 + ], + [ + 651, + 529 + ], + [ + 656, + 527 + ], + [ + 656, + 520 + ], + [ + 656, + 503 + ], + [ + 654, + 497 + ], + [ + 654, + 483 + ], + [ + 659, + 470 + ], + [ + 660, + 459 + ], + [ + 659, + 441 + ], + [ + 661, + 434 + ], + [ + 661, + 428 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 622, + 425 + ], + [ + 618, + 415 + ], + [ + 610, + 409 + ], + [ + 605, + 408 + ], + [ + 601, + 411 + ], + [ + 597, + 414 + ], + [ + 597, + 423 + ], + [ + 598, + 429 + ], + [ + 596, + 437 + ], + [ + 594, + 455 + ], + [ + 591, + 467 + ], + [ + 590, + 479 + ], + [ + 608, + 541 + ], + [ + 626, + 543 + ], + [ + 631, + 542 + ], + [ + 633, + 539 + ], + [ + 631, + 530 + ], + [ + 632, + 520 + ], + [ + 632, + 512 + ], + [ + 629, + 504 + ], + [ + 629, + 488 + ], + [ + 635, + 484 + ], + [ + 639, + 470 + ], + [ + 639, + 452 + ], + [ + 639, + 441 + ], + [ + 635, + 437 + ], + [ + 629, + 433 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 547, + 549 + ], + [ + 548, + 490 + ], + [ + 542, + 480 + ], + [ + 566, + 476 + ], + [ + 577, + 476 + ], + [ + 590, + 477 + ], + [ + 607, + 481 + ], + [ + 611, + 483 + ], + [ + 610, + 557 + ], + [ + 569, + 561 + ], + [ + 544, + 559 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 709, + 421 + ], + [ + 698, + 411 + ], + [ + 682, + 409 + ], + [ + 680, + 420 + ], + [ + 678, + 424 + ], + [ + 678, + 429 + ], + [ + 674, + 439 + ], + [ + 674, + 446 + ], + [ + 676, + 454 + ], + [ + 681, + 454 + ], + [ + 682, + 454 + ], + [ + 678, + 463 + ], + [ + 675, + 480 + ], + [ + 671, + 497 + ], + [ + 675, + 499 + ], + [ + 677, + 506 + ], + [ + 680, + 514 + ], + [ + 678, + 521 + ], + [ + 677, + 526 + ], + [ + 672, + 530 + ], + [ + 672, + 533 + ], + [ + 675, + 534 + ], + [ + 683, + 533 + ], + [ + 691, + 531 + ], + [ + 692, + 527 + ], + [ + 689, + 520 + ], + [ + 689, + 507 + ], + [ + 689, + 499 + ], + [ + 689, + 494 + ], + [ + 697, + 494 + ], + [ + 701, + 499 + ], + [ + 701, + 506 + ], + [ + 703, + 516 + ], + [ + 701, + 528 + ], + [ + 700, + 534 + ], + [ + 703, + 537 + ], + [ + 708, + 536 + ], + [ + 711, + 532 + ], + [ + 711, + 525 + ], + [ + 709, + 517 + ], + [ + 711, + 507 + ], + [ + 711, + 499 + ], + [ + 711, + 492 + ], + [ + 717, + 489 + ], + [ + 721, + 484 + ], + [ + 721, + 454 + ], + [ + 715, + 439 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 764, + 428 + ], + [ + 755, + 419 + ], + [ + 752, + 410 + ], + [ + 748, + 401 + ], + [ + 741, + 401 + ], + [ + 733, + 409 + ], + [ + 732, + 417 + ], + [ + 731, + 420 + ], + [ + 727, + 428 + ], + [ + 723, + 442 + ], + [ + 720, + 465 + ], + [ + 719, + 479 + ], + [ + 722, + 484 + ], + [ + 726, + 484 + ], + [ + 729, + 478 + ], + [ + 731, + 468 + ], + [ + 736, + 483 + ], + [ + 739, + 499 + ], + [ + 741, + 508 + ], + [ + 739, + 526 + ], + [ + 733, + 533 + ], + [ + 733, + 537 + ], + [ + 736, + 539 + ], + [ + 747, + 537 + ], + [ + 750, + 539 + ], + [ + 753, + 540 + ], + [ + 759, + 539 + ], + [ + 761, + 536 + ], + [ + 764, + 532 + ], + [ + 765, + 528 + ], + [ + 760, + 520 + ], + [ + 756, + 510 + ], + [ + 754, + 499 + ], + [ + 757, + 486 + ], + [ + 761, + 469 + ], + [ + 763, + 466 + ], + [ + 769, + 460 + ], + [ + 770, + 433 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 791, + 418 + ], + [ + 781, + 413 + ], + [ + 780, + 404 + ], + [ + 767, + 400 + ], + [ + 762, + 408 + ], + [ + 761, + 417 + ], + [ + 760, + 423 + ], + [ + 764, + 431 + ], + [ + 764, + 441 + ], + [ + 764, + 449 + ], + [ + 766, + 460 + ], + [ + 766, + 466 + ], + [ + 763, + 478 + ], + [ + 765, + 490 + ], + [ + 767, + 494 + ], + [ + 770, + 494 + ], + [ + 775, + 504 + ], + [ + 775, + 525 + ], + [ + 772, + 532 + ], + [ + 772, + 535 + ], + [ + 777, + 536 + ], + [ + 782, + 538 + ], + [ + 788, + 537 + ], + [ + 791, + 532 + ], + [ + 793, + 524 + ], + [ + 789, + 512 + ], + [ + 787, + 497 + ], + [ + 792, + 485 + ], + [ + 796, + 461 + ], + [ + 794, + 451 + ], + [ + 791, + 447 + ], + [ + 798, + 445 + ], + [ + 807, + 438 + ], + [ + 806, + 434 + ], + [ + 796, + 421 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 716, + 401 + ], + [ + 697, + 393 + ], + [ + 679, + 392 + ], + [ + 668, + 395 + ], + [ + 647, + 404 + ], + [ + 635, + 418 + ], + [ + 638, + 424 + ], + [ + 661, + 426 + ], + [ + 673, + 421 + ], + [ + 687, + 420 + ], + [ + 700, + 412 + ], + [ + 715, + 407 + ], + [ + 722, + 401 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 748, + 387 + ], + [ + 724, + 394 + ], + [ + 711, + 409 + ], + [ + 743, + 409 + ], + [ + 757, + 408 + ], + [ + 769, + 408 + ], + [ + 793, + 405 + ], + [ + 777, + 393 + ], + [ + 755, + 387 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1090, + 424 + ], + [ + 1038, + 425 + ], + [ + 1038, + 429 + ], + [ + 1036, + 452 + ], + [ + 1038, + 453 + ], + [ + 1037, + 461 + ], + [ + 1030, + 459 + ], + [ + 1030, + 470 + ], + [ + 1032, + 475 + ], + [ + 1037, + 478 + ], + [ + 1041, + 479 + ], + [ + 1081, + 481 + ], + [ + 1094, + 475 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1187, + 433 + ], + [ + 1187, + 489 + ], + [ + 1219, + 489 + ], + [ + 1218, + 431 + ], + [ + 1214, + 430 + ], + [ + 1218, + 418 + ], + [ + 1214, + 408 + ], + [ + 1205, + 402 + ], + [ + 1193, + 403 + ], + [ + 1186, + 411 + ], + [ + 1185, + 421 + ], + [ + 1187, + 425 + ], + [ + 1192, + 431 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 972, + 375 + ], + [ + 973, + 425 + ], + [ + 1006, + 425 + ], + [ + 1004, + 374 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1066, + 454 + ], + [ + 1069, + 445 + ], + [ + 1066, + 426 + ], + [ + 1059, + 414 + ], + [ + 1057, + 414 + ], + [ + 1056, + 410 + ], + [ + 1051, + 408 + ], + [ + 1048, + 411 + ], + [ + 1049, + 418 + ], + [ + 1049, + 423 + ], + [ + 1049, + 430 + ], + [ + 1041, + 430 + ], + [ + 1036, + 430 + ], + [ + 1032, + 433 + ], + [ + 1035, + 436 + ], + [ + 1039, + 435 + ], + [ + 1046, + 435 + ], + [ + 1051, + 438 + ], + [ + 1052, + 442 + ], + [ + 1049, + 446 + ], + [ + 1053, + 452 + ], + [ + 1055, + 469 + ], + [ + 1066, + 470 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1022, + 500 + ], + [ + 1019, + 439 + ], + [ + 1020, + 431 + ], + [ + 1017, + 420 + ], + [ + 1011, + 415 + ], + [ + 1002, + 414 + ], + [ + 996, + 415 + ], + [ + 988, + 423 + ], + [ + 987, + 430 + ], + [ + 989, + 438 + ], + [ + 987, + 439 + ], + [ + 989, + 501 + ], + [ + 1012, + 501 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1086, + 319 + ], + [ + 1089, + 461 + ], + [ + 1096, + 461 + ], + [ + 1093, + 319 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1178, + 461 + ], + [ + 1182, + 460 + ], + [ + 1179, + 446 + ], + [ + 1176, + 440 + ], + [ + 1171, + 419 + ], + [ + 1168, + 412 + ], + [ + 1164, + 408 + ], + [ + 1160, + 399 + ], + [ + 1157, + 396 + ], + [ + 1154, + 396 + ], + [ + 1151, + 398 + ], + [ + 1150, + 403 + ], + [ + 1147, + 406 + ], + [ + 1140, + 415 + ], + [ + 1138, + 430 + ], + [ + 1139, + 441 + ], + [ + 1141, + 463 + ], + [ + 1147, + 463 + ], + [ + 1151, + 483 + ], + [ + 1175, + 471 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1259, + 429 + ], + [ + 1254, + 413 + ], + [ + 1251, + 406 + ], + [ + 1247, + 403 + ], + [ + 1245, + 398 + ], + [ + 1241, + 396 + ], + [ + 1237, + 398 + ], + [ + 1235, + 401 + ], + [ + 1230, + 404 + ], + [ + 1226, + 416 + ], + [ + 1225, + 428 + ], + [ + 1228, + 438 + ], + [ + 1233, + 441 + ], + [ + 1235, + 457 + ], + [ + 1237, + 470 + ], + [ + 1242, + 473 + ], + [ + 1248, + 472 + ], + [ + 1251, + 458 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1472, + 347 + ], + [ + 1473, + 397 + ], + [ + 1478, + 397 + ], + [ + 1477, + 346 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1577, + 346 + ], + [ + 1578, + 396 + ], + [ + 1584, + 397 + ], + [ + 1582, + 346 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1674, + 348 + ], + [ + 1676, + 398 + ], + [ + 1671, + 397 + ], + [ + 1670, + 345 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1266, + 465 + ], + [ + 1267, + 448 + ], + [ + 1267, + 438 + ], + [ + 1268, + 431 + ], + [ + 1274, + 433 + ], + [ + 1280, + 437 + ], + [ + 1287, + 437 + ], + [ + 1289, + 433 + ], + [ + 1293, + 433 + ], + [ + 1299, + 426 + ], + [ + 1304, + 419 + ], + [ + 1306, + 415 + ], + [ + 1312, + 411 + ], + [ + 1319, + 407 + ], + [ + 1325, + 398 + ], + [ + 1330, + 398 + ], + [ + 1336, + 411 + ], + [ + 1339, + 415 + ], + [ + 1344, + 410 + ], + [ + 1349, + 409 + ], + [ + 1354, + 397 + ], + [ + 1360, + 397 + ], + [ + 1367, + 398 + ], + [ + 1374, + 401 + ], + [ + 1374, + 407 + ], + [ + 1373, + 421 + ], + [ + 1383, + 422 + ], + [ + 1390, + 420 + ], + [ + 1400, + 422 + ], + [ + 1412, + 423 + ], + [ + 1422, + 417 + ], + [ + 1422, + 395 + ], + [ + 1804, + 385 + ], + [ + 1972, + 376 + ], + [ + 2018, + 399 + ], + [ + 1999, + 449 + ], + [ + 1785, + 466 + ], + [ + 1531, + 479 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 1529, + 491 + ], + [ + 1443, + 491 + ], + [ + 1446, + 468 + ], + [ + 1528, + 470 + ], + [ + 1999, + 443 + ], + [ + 1994, + 458 + ], + [ + 1982, + 459 + ], + [ + 1829, + 471 + ], + [ + 1743, + 477 + ], + [ + 1693, + 481 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1789, + 333 + ], + [ + 1680, + 311 + ], + [ + 1675, + 308 + ], + [ + 1667, + 308 + ], + [ + 1661, + 311 + ], + [ + 1637, + 314 + ], + [ + 1621, + 316 + ], + [ + 1591, + 309 + ], + [ + 1578, + 304 + ], + [ + 1568, + 308 + ], + [ + 1544, + 317 + ], + [ + 1484, + 305 + ], + [ + 1474, + 301 + ], + [ + 1463, + 306 + ], + [ + 1340, + 338 + ], + [ + 1340, + 349 + ], + [ + 1342, + 356 + ], + [ + 1424, + 354 + ], + [ + 1491, + 355 + ], + [ + 1601, + 355 + ], + [ + 1642, + 356 + ], + [ + 1715, + 353 + ], + [ + 1793, + 349 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1063, + 205 + ], + [ + 1110, + 208 + ], + [ + 1112, + 266 + ], + [ + 1065, + 264 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1114, + 291 + ], + [ + 1114, + 265 + ], + [ + 1066, + 263 + ], + [ + 1067, + 288 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1115, + 324 + ], + [ + 1114, + 289 + ], + [ + 1067, + 287 + ], + [ + 1068, + 322 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1157, + 433 + ], + [ + 1158, + 429 + ], + [ + 1161, + 429 + ], + [ + 1165, + 429 + ], + [ + 1166, + 432 + ], + [ + 1167, + 478 + ], + [ + 1158, + 480 + ], + [ + 1157, + 463 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1212, + 506 + ], + [ + 1210, + 434 + ], + [ + 1211, + 430 + ], + [ + 1213, + 428 + ], + [ + 1215, + 428 + ], + [ + 1218, + 429 + ], + [ + 1220, + 431 + ], + [ + 1222, + 506 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1263, + 429 + ], + [ + 1260, + 427 + ], + [ + 1256, + 427 + ], + [ + 1254, + 429 + ], + [ + 1254, + 432 + ], + [ + 1257, + 500 + ], + [ + 1266, + 501 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1353, + 498 + ], + [ + 1350, + 428 + ], + [ + 1348, + 425 + ], + [ + 1343, + 425 + ], + [ + 1342, + 429 + ], + [ + 1342, + 443 + ], + [ + 1345, + 492 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1394, + 457 + ], + [ + 1393, + 426 + ], + [ + 1391, + 423 + ], + [ + 1386, + 423 + ], + [ + 1384, + 426 + ], + [ + 1384, + 455 + ], + [ + 1389, + 466 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1433, + 448 + ], + [ + 1432, + 425 + ], + [ + 1431, + 422 + ], + [ + 1425, + 422 + ], + [ + 1422, + 427 + ], + [ + 1423, + 451 + ], + [ + 1432, + 453 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1451, + 466 + ], + [ + 1445, + 271 + ], + [ + 1450, + 271 + ], + [ + 1456, + 464 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 1496, + 464 + ], + [ + 1485, + 454 + ], + [ + 1473, + 449 + ], + [ + 1468, + 451 + ], + [ + 1465, + 451 + ], + [ + 1458, + 449 + ], + [ + 1447, + 450 + ], + [ + 1443, + 446 + ], + [ + 1445, + 440 + ], + [ + 1461, + 439 + ], + [ + 1464, + 435 + ], + [ + 1464, + 431 + ], + [ + 1447, + 430 + ], + [ + 1437, + 434 + ], + [ + 1439, + 438 + ], + [ + 1438, + 444 + ], + [ + 1429, + 443 + ], + [ + 1425, + 434 + ], + [ + 1423, + 422 + ], + [ + 1432, + 422 + ], + [ + 1432, + 419 + ], + [ + 1421, + 419 + ], + [ + 1419, + 422 + ], + [ + 1418, + 427 + ], + [ + 1415, + 424 + ], + [ + 1409, + 424 + ], + [ + 1406, + 425 + ], + [ + 1398, + 423 + ], + [ + 1394, + 421 + ], + [ + 1394, + 424 + ], + [ + 1402, + 428 + ], + [ + 1404, + 432 + ], + [ + 1400, + 441 + ], + [ + 1396, + 448 + ], + [ + 1391, + 437 + ], + [ + 1390, + 426 + ], + [ + 1381, + 424 + ], + [ + 1372, + 425 + ], + [ + 1370, + 436 + ], + [ + 1360, + 439 + ], + [ + 1344, + 436 + ], + [ + 1345, + 425 + ], + [ + 1355, + 419 + ], + [ + 1352, + 417 + ], + [ + 1337, + 424 + ], + [ + 1328, + 430 + ], + [ + 1322, + 433 + ], + [ + 1324, + 438 + ], + [ + 1328, + 438 + ], + [ + 1327, + 449 + ], + [ + 1311, + 447 + ], + [ + 1304, + 432 + ], + [ + 1289, + 432 + ], + [ + 1287, + 436 + ], + [ + 1299, + 442 + ], + [ + 1298, + 450 + ], + [ + 1294, + 451 + ], + [ + 1291, + 447 + ], + [ + 1279, + 444 + ], + [ + 1267, + 440 + ], + [ + 1266, + 441 + ], + [ + 1270, + 447 + ], + [ + 1267, + 457 + ], + [ + 1263, + 461 + ], + [ + 1256, + 472 + ], + [ + 1251, + 471 + ], + [ + 1248, + 466 + ], + [ + 1245, + 462 + ], + [ + 1239, + 462 + ], + [ + 1236, + 445 + ], + [ + 1237, + 426 + ], + [ + 1244, + 422 + ], + [ + 1242, + 419 + ], + [ + 1233, + 421 + ], + [ + 1232, + 435 + ], + [ + 1227, + 435 + ], + [ + 1226, + 426 + ], + [ + 1217, + 427 + ], + [ + 1216, + 430 + ], + [ + 1223, + 431 + ], + [ + 1227, + 439 + ], + [ + 1231, + 460 + ], + [ + 1225, + 472 + ], + [ + 1221, + 476 + ], + [ + 1210, + 475 + ], + [ + 1199, + 466 + ], + [ + 1193, + 449 + ], + [ + 1203, + 442 + ], + [ + 1200, + 439 + ], + [ + 1187, + 442 + ], + [ + 1184, + 441 + ], + [ + 1179, + 440 + ], + [ + 1177, + 441 + ], + [ + 1175, + 444 + ], + [ + 1178, + 447 + ], + [ + 1180, + 453 + ], + [ + 1177, + 456 + ], + [ + 1174, + 458 + ], + [ + 1166, + 455 + ], + [ + 1159, + 458 + ], + [ + 1156, + 459 + ], + [ + 1154, + 444 + ], + [ + 1159, + 441 + ], + [ + 1160, + 437 + ], + [ + 1147, + 435 + ], + [ + 1145, + 441 + ], + [ + 1148, + 446 + ], + [ + 1147, + 457 + ], + [ + 1146, + 473 + ], + [ + 1139, + 477 + ], + [ + 1119, + 449 + ], + [ + 1121, + 437 + ], + [ + 1126, + 432 + ], + [ + 1139, + 430 + ], + [ + 1140, + 428 + ], + [ + 1125, + 428 + ], + [ + 1119, + 431 + ], + [ + 1114, + 433 + ], + [ + 1107, + 432 + ], + [ + 1104, + 436 + ], + [ + 1115, + 437 + ], + [ + 1115, + 443 + ], + [ + 1105, + 446 + ], + [ + 1100, + 452 + ], + [ + 1087, + 455 + ], + [ + 1087, + 449 + ], + [ + 1092, + 446 + ], + [ + 1092, + 442 + ], + [ + 1090, + 441 + ], + [ + 1077, + 443 + ], + [ + 1077, + 447 + ], + [ + 1082, + 448 + ], + [ + 1081, + 455 + ], + [ + 1081, + 463 + ], + [ + 1081, + 467 + ], + [ + 1070, + 467 + ], + [ + 1064, + 448 + ], + [ + 1062, + 444 + ], + [ + 1059, + 437 + ], + [ + 1066, + 435 + ], + [ + 1071, + 431 + ], + [ + 1087, + 433 + ], + [ + 1089, + 429 + ], + [ + 1080, + 427 + ], + [ + 1078, + 425 + ], + [ + 1074, + 424 + ], + [ + 1073, + 427 + ], + [ + 1070, + 427 + ], + [ + 1064, + 432 + ], + [ + 1054, + 435 + ], + [ + 1044, + 432 + ], + [ + 1036, + 431 + ], + [ + 1035, + 434 + ], + [ + 1046, + 436 + ], + [ + 1052, + 439 + ], + [ + 1052, + 444 + ], + [ + 1050, + 446 + ], + [ + 1050, + 449 + ], + [ + 1050, + 451 + ], + [ + 1051, + 454 + ], + [ + 1051, + 458 + ], + [ + 1051, + 460 + ], + [ + 1046, + 462 + ], + [ + 1042, + 469 + ], + [ + 1039, + 479 + ], + [ + 1040, + 492 + ], + [ + 1045, + 501 + ], + [ + 1052, + 510 + ], + [ + 1058, + 513 + ], + [ + 1068, + 512 + ], + [ + 1076, + 509 + ], + [ + 1079, + 506 + ], + [ + 1083, + 498 + ], + [ + 1086, + 499 + ], + [ + 1090, + 506 + ], + [ + 1094, + 512 + ], + [ + 1099, + 513 + ], + [ + 1105, + 515 + ], + [ + 1110, + 513 + ], + [ + 1112, + 511 + ], + [ + 1118, + 510 + ], + [ + 1123, + 510 + ], + [ + 1129, + 504 + ], + [ + 1131, + 499 + ], + [ + 1146, + 492 + ], + [ + 1148, + 494 + ], + [ + 1138, + 512 + ], + [ + 1141, + 512 + ], + [ + 1150, + 497 + ], + [ + 1154, + 495 + ], + [ + 1159, + 502 + ], + [ + 1156, + 513 + ], + [ + 1168, + 512 + ], + [ + 1181, + 511 + ], + [ + 1187, + 507 + ], + [ + 1194, + 504 + ], + [ + 1199, + 500 + ], + [ + 1200, + 492 + ], + [ + 1206, + 498 + ], + [ + 1209, + 503 + ], + [ + 1215, + 497 + ], + [ + 1220, + 489 + ], + [ + 1221, + 482 + ], + [ + 1234, + 469 + ], + [ + 1236, + 472 + ], + [ + 1231, + 480 + ], + [ + 1231, + 488 + ], + [ + 1235, + 499 + ], + [ + 1241, + 506 + ], + [ + 1250, + 507 + ], + [ + 1260, + 504 + ], + [ + 1264, + 499 + ], + [ + 1273, + 502 + ], + [ + 1282, + 500 + ], + [ + 1292, + 494 + ], + [ + 1295, + 490 + ], + [ + 1303, + 489 + ], + [ + 1310, + 495 + ], + [ + 1318, + 499 + ], + [ + 1326, + 498 + ], + [ + 1335, + 494 + ], + [ + 1337, + 491 + ], + [ + 1344, + 498 + ], + [ + 1351, + 501 + ], + [ + 1360, + 499 + ], + [ + 1364, + 493 + ], + [ + 1372, + 495 + ], + [ + 1382, + 493 + ], + [ + 1388, + 498 + ], + [ + 1398, + 498 + ], + [ + 1407, + 494 + ], + [ + 1420, + 492 + ], + [ + 1424, + 492 + ], + [ + 1432, + 496 + ], + [ + 1444, + 495 + ], + [ + 1454, + 494 + ], + [ + 1467, + 487 + ], + [ + 1479, + 493 + ], + [ + 1492, + 492 + ], + [ + 1497, + 488 + ], + [ + 1501, + 479 + ], + [ + 1500, + 470 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1311, + 499 + ], + [ + 1309, + 429 + ], + [ + 1307, + 427 + ], + [ + 1303, + 426 + ], + [ + 1300, + 427 + ], + [ + 1300, + 430 + ], + [ + 1302, + 499 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1395, + 289 + ], + [ + 1392, + 291 + ], + [ + 1392, + 305 + ], + [ + 1393, + 307 + ], + [ + 1396, + 307 + ], + [ + 1441, + 307 + ], + [ + 1447, + 304 + ], + [ + 1447, + 289 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1482, + 269 + ], + [ + 1452, + 265 + ], + [ + 1451, + 266 + ], + [ + 1450, + 280 + ], + [ + 1450, + 283 + ], + [ + 1452, + 284 + ], + [ + 1482, + 287 + ], + [ + 1484, + 286 + ], + [ + 1485, + 284 + ], + [ + 1485, + 271 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1453, + 299 + ], + [ + 1448, + 297 + ], + [ + 1444, + 298 + ], + [ + 1440, + 300 + ], + [ + 1435, + 306 + ], + [ + 1434, + 316 + ], + [ + 1437, + 323 + ], + [ + 1440, + 327 + ], + [ + 1447, + 328 + ], + [ + 1454, + 326 + ], + [ + 1459, + 321 + ], + [ + 1461, + 313 + ], + [ + 1460, + 306 + ], + [ + 1458, + 302 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1455, + 340 + ], + [ + 1455, + 327 + ], + [ + 1441, + 327 + ], + [ + 1441, + 340 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 625, + 558 + ], + [ + 622, + 463 + ], + [ + 619, + 453 + ], + [ + 612, + 305 + ], + [ + 610, + 196 + ], + [ + 605, + 188 + ], + [ + 599, + 0 + ], + [ + 592, + 0 + ], + [ + 598, + 188 + ], + [ + 595, + 198 + ], + [ + 606, + 453 + ], + [ + 603, + 464 + ], + [ + 606, + 559 + ], + [ + 614, + 560 + ], + [ + 619, + 560 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 610, + 127 + ], + [ + 594, + 127 + ], + [ + 586, + 132 + ], + [ + 576, + 144 + ], + [ + 573, + 159 + ], + [ + 575, + 174 + ], + [ + 580, + 182 + ], + [ + 588, + 188 + ], + [ + 598, + 190 + ], + [ + 608, + 189 + ], + [ + 618, + 181 + ], + [ + 625, + 164 + ], + [ + 624, + 145 + ], + [ + 619, + 134 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 551, + 207 + ], + [ + 539, + 210 + ], + [ + 530, + 219 + ], + [ + 525, + 231 + ], + [ + 524, + 246 + ], + [ + 529, + 261 + ], + [ + 537, + 266 + ], + [ + 547, + 270 + ], + [ + 558, + 270 + ], + [ + 572, + 264 + ], + [ + 578, + 255 + ], + [ + 581, + 239 + ], + [ + 577, + 220 + ], + [ + 568, + 212 + ], + [ + 559, + 209 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 616, + 227 + ], + [ + 615, + 222 + ], + [ + 604, + 221 + ], + [ + 606, + 304 + ], + [ + 611, + 305 + ], + [ + 612, + 309 + ], + [ + 622, + 309 + ], + [ + 622, + 304 + ], + [ + 638, + 299 + ], + [ + 640, + 297 + ], + [ + 639, + 291 + ], + [ + 636, + 287 + ], + [ + 632, + 286 + ], + [ + 628, + 286 + ], + [ + 623, + 284 + ], + [ + 624, + 276 + ], + [ + 639, + 272 + ], + [ + 641, + 268 + ], + [ + 640, + 262 + ], + [ + 635, + 259 + ], + [ + 629, + 259 + ], + [ + 624, + 257 + ], + [ + 623, + 250 + ], + [ + 635, + 246 + ], + [ + 639, + 245 + ], + [ + 642, + 242 + ], + [ + 642, + 238 + ], + [ + 638, + 232 + ], + [ + 634, + 231 + ], + [ + 625, + 228 + ], + [ + 625, + 228 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 583, + 306 + ], + [ + 582, + 297 + ], + [ + 581, + 292 + ], + [ + 581, + 285 + ], + [ + 581, + 269 + ], + [ + 579, + 265 + ], + [ + 579, + 259 + ], + [ + 583, + 252 + ], + [ + 582, + 246 + ], + [ + 578, + 240 + ], + [ + 575, + 233 + ], + [ + 577, + 226 + ], + [ + 581, + 222 + ], + [ + 586, + 218 + ], + [ + 597, + 216 + ], + [ + 605, + 220 + ], + [ + 612, + 231 + ], + [ + 614, + 309 + ], + [ + 614, + 315 + ], + [ + 608, + 316 + ], + [ + 607, + 321 + ], + [ + 600, + 320 + ], + [ + 599, + 312 + ], + [ + 591, + 311 + ], + [ + 591, + 306 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 512, + 92 + ], + [ + 514, + 88 + ], + [ + 520, + 85 + ], + [ + 586, + 83 + ], + [ + 592, + 84 + ], + [ + 594, + 88 + ], + [ + 595, + 110 + ], + [ + 592, + 111 + ], + [ + 572, + 112 + ], + [ + 572, + 121 + ], + [ + 571, + 124 + ], + [ + 566, + 125 + ], + [ + 540, + 126 + ], + [ + 538, + 125 + ], + [ + 536, + 114 + ], + [ + 520, + 114 + ], + [ + 515, + 112 + ], + [ + 513, + 107 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1778, + 418 + ], + [ + 1779, + 511 + ], + [ + 1773, + 522 + ], + [ + 1791, + 522 + ], + [ + 1788, + 511 + ], + [ + 1784, + 418 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1984, + 492 + ], + [ + 1983, + 419 + ], + [ + 1990, + 419 + ], + [ + 1993, + 494 + ], + [ + 1996, + 502 + ], + [ + 1981, + 503 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1757, + 86 + ], + [ + 1733, + 84 + ], + [ + 1708, + 87 + ], + [ + 1693, + 90 + ], + [ + 1684, + 87 + ], + [ + 1664, + 87 + ], + [ + 1667, + 122 + ], + [ + 1680, + 121 + ], + [ + 1695, + 109 + ], + [ + 1699, + 109 + ], + [ + 1711, + 137 + ], + [ + 1719, + 135 + ], + [ + 1712, + 107 + ], + [ + 1731, + 111 + ], + [ + 1735, + 125 + ], + [ + 1742, + 129 + ], + [ + 1738, + 155 + ], + [ + 1735, + 151 + ], + [ + 1711, + 156 + ], + [ + 1715, + 162 + ], + [ + 1725, + 163 + ], + [ + 1732, + 169 + ], + [ + 1732, + 187 + ], + [ + 1740, + 191 + ], + [ + 1749, + 201 + ], + [ + 1754, + 196 + ], + [ + 1755, + 179 + ], + [ + 1762, + 186 + ], + [ + 1762, + 206 + ], + [ + 1765, + 213 + ], + [ + 1771, + 213 + ], + [ + 1771, + 195 + ], + [ + 1776, + 187 + ], + [ + 1782, + 190 + ], + [ + 1786, + 196 + ], + [ + 1796, + 200 + ], + [ + 1797, + 192 + ], + [ + 1801, + 193 + ], + [ + 1805, + 200 + ], + [ + 1802, + 211 + ], + [ + 1805, + 215 + ], + [ + 1772, + 221 + ], + [ + 1740, + 237 + ], + [ + 1733, + 248 + ], + [ + 1737, + 256 + ], + [ + 1744, + 257 + ], + [ + 1735, + 267 + ], + [ + 1732, + 276 + ], + [ + 1744, + 272 + ], + [ + 1745, + 280 + ], + [ + 1741, + 286 + ], + [ + 1746, + 302 + ], + [ + 1764, + 316 + ], + [ + 1767, + 297 + ], + [ + 1774, + 310 + ], + [ + 1786, + 305 + ], + [ + 1797, + 296 + ], + [ + 1795, + 283 + ], + [ + 1802, + 281 + ], + [ + 1803, + 281 + ], + [ + 1799, + 319 + ], + [ + 1801, + 347 + ], + [ + 1805, + 380 + ], + [ + 1830, + 338 + ], + [ + 1828, + 286 + ], + [ + 1831, + 281 + ], + [ + 1837, + 291 + ], + [ + 1843, + 298 + ], + [ + 1881, + 302 + ], + [ + 1886, + 293 + ], + [ + 1879, + 279 + ], + [ + 1871, + 275 + ], + [ + 1869, + 272 + ], + [ + 1875, + 271 + ], + [ + 1881, + 262 + ], + [ + 1913, + 216 + ], + [ + 1921, + 196 + ], + [ + 1917, + 193 + ], + [ + 1913, + 196 + ], + [ + 1907, + 190 + ], + [ + 1908, + 183 + ], + [ + 1924, + 183 + ], + [ + 1927, + 177 + ], + [ + 1925, + 168 + ], + [ + 1934, + 168 + ], + [ + 1946, + 175 + ], + [ + 1953, + 170 + ], + [ + 1966, + 162 + ], + [ + 1968, + 156 + ], + [ + 1961, + 145 + ], + [ + 1981, + 141 + ], + [ + 1979, + 128 + ], + [ + 1921, + 1 + ], + [ + 1782, + 1 + ], + [ + 1769, + 24 + ], + [ + 1755, + 14 + ], + [ + 1752, + 26 + ], + [ + 1757, + 34 + ], + [ + 1756, + 43 + ], + [ + 1760, + 52 + ], + [ + 1764, + 71 + ], + [ + 1765, + 77 + ], + [ + 1760, + 88 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1830, + 59 + ], + [ + 1831, + 40 + ], + [ + 1839, + 26 + ], + [ + 1862, + 1 + ], + [ + 2047, + 1 + ], + [ + 2047, + 202 + ], + [ + 2029, + 198 + ], + [ + 2013, + 190 + ], + [ + 2006, + 181 + ], + [ + 2001, + 132 + ], + [ + 1941, + 122 + ], + [ + 1888, + 109 + ], + [ + 1847, + 90 + ], + [ + 1836, + 77 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1629, + 64 + ], + [ + 1612, + 73 + ], + [ + 1603, + 92 + ], + [ + 1604, + 109 + ], + [ + 1611, + 129 + ], + [ + 1621, + 136 + ], + [ + 1638, + 141 + ], + [ + 1664, + 131 + ], + [ + 1674, + 117 + ], + [ + 1676, + 102 + ], + [ + 1670, + 83 + ], + [ + 1656, + 71 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1674, + 567 + ], + [ + 1657, + 1 + ], + [ + 1625, + 1 + ], + [ + 1644, + 569 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1617, + 147 + ], + [ + 1608, + 157 + ], + [ + 1606, + 165 + ], + [ + 1608, + 173 + ], + [ + 1612, + 180 + ], + [ + 1621, + 181 + ], + [ + 1612, + 188 + ], + [ + 1610, + 198 + ], + [ + 1610, + 206 + ], + [ + 1614, + 213 + ], + [ + 1621, + 215 + ], + [ + 1620, + 219 + ], + [ + 1612, + 224 + ], + [ + 1610, + 237 + ], + [ + 1614, + 245 + ], + [ + 1620, + 249 + ], + [ + 1621, + 251 + ], + [ + 1633, + 252 + ], + [ + 1633, + 258 + ], + [ + 1643, + 262 + ], + [ + 1652, + 254 + ], + [ + 1655, + 247 + ], + [ + 1651, + 143 + ], + [ + 1631, + 144 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1652, + 277 + ], + [ + 1647, + 264 + ], + [ + 1641, + 258 + ], + [ + 1633, + 258 + ], + [ + 1624, + 265 + ], + [ + 1621, + 278 + ], + [ + 1621, + 295 + ], + [ + 1625, + 308 + ], + [ + 1631, + 312 + ], + [ + 1641, + 311 + ], + [ + 1649, + 302 + ], + [ + 1652, + 289 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1754, + 564 + ], + [ + 1746, + 464 + ], + [ + 1732, + 459 + ], + [ + 1698, + 461 + ], + [ + 1687, + 467 + ], + [ + 1691, + 565 + ], + [ + 1696, + 568 + ], + [ + 1751, + 566 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1941, + 330 + ], + [ + 1938, + 330 + ], + [ + 1935, + 332 + ], + [ + 1930, + 341 + ], + [ + 1946, + 377 + ], + [ + 1979, + 405 + ], + [ + 1988, + 408 + ], + [ + 1993, + 457 + ], + [ + 1991, + 475 + ], + [ + 1994, + 503 + ], + [ + 1998, + 529 + ], + [ + 1994, + 552 + ], + [ + 1991, + 585 + ], + [ + 1992, + 607 + ], + [ + 1991, + 615 + ], + [ + 1991, + 632 + ], + [ + 1974, + 636 + ], + [ + 1968, + 640 + ], + [ + 1968, + 646 + ], + [ + 1975, + 654 + ], + [ + 1996, + 658 + ], + [ + 2018, + 661 + ], + [ + 2027, + 660 + ], + [ + 2029, + 649 + ], + [ + 2029, + 625 + ], + [ + 2026, + 580 + ], + [ + 2027, + 547 + ], + [ + 2030, + 535 + ], + [ + 2032, + 509 + ], + [ + 2038, + 494 + ], + [ + 2044, + 504 + ], + [ + 2047, + 530 + ], + [ + 2047, + 235 + ], + [ + 2036, + 226 + ], + [ + 2011, + 223 + ], + [ + 1969, + 211 + ], + [ + 1951, + 209 + ], + [ + 1873, + 224 + ], + [ + 1833, + 257 + ], + [ + 1855, + 260 + ], + [ + 1869, + 268 + ], + [ + 1883, + 266 + ], + [ + 1915, + 268 + ], + [ + 1942, + 267 + ], + [ + 1942, + 330 + ], + [ + 1948, + 332 + ], + [ + 1951, + 268 + ], + [ + 1965, + 272 + ], + [ + 1993, + 269 + ], + [ + 1993, + 278 + ], + [ + 1991, + 289 + ], + [ + 1999, + 290 + ], + [ + 1999, + 297 + ], + [ + 2001, + 304 + ], + [ + 2009, + 306 + ], + [ + 2011, + 314 + ], + [ + 1998, + 316 + ], + [ + 1990, + 321 + ], + [ + 1981, + 333 + ], + [ + 1971, + 359 + ], + [ + 1957, + 344 + ], + [ + 1947, + 331 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1918, + 677 + ], + [ + 1891, + 679 + ], + [ + 1870, + 672 + ], + [ + 1856, + 666 + ], + [ + 1853, + 652 + ], + [ + 1858, + 648 + ], + [ + 1871, + 641 + ], + [ + 1873, + 634 + ], + [ + 1871, + 621 + ], + [ + 1868, + 600 + ], + [ + 1856, + 554 + ], + [ + 1845, + 512 + ], + [ + 1836, + 492 + ], + [ + 1836, + 477 + ], + [ + 1828, + 476 + ], + [ + 1824, + 465 + ], + [ + 1820, + 439 + ], + [ + 1811, + 458 + ], + [ + 1807, + 480 + ], + [ + 1804, + 490 + ], + [ + 1802, + 498 + ], + [ + 1792, + 497 + ], + [ + 1784, + 489 + ], + [ + 1783, + 480 + ], + [ + 1786, + 474 + ], + [ + 1782, + 470 + ], + [ + 1780, + 463 + ], + [ + 1784, + 441 + ], + [ + 1789, + 428 + ], + [ + 1791, + 408 + ], + [ + 1795, + 392 + ], + [ + 1802, + 375 + ], + [ + 1809, + 358 + ], + [ + 1816, + 344 + ], + [ + 1822, + 335 + ], + [ + 1832, + 329 + ], + [ + 1836, + 327 + ], + [ + 1841, + 322 + ], + [ + 1846, + 314 + ], + [ + 1845, + 305 + ], + [ + 1845, + 286 + ], + [ + 1852, + 279 + ], + [ + 1861, + 274 + ], + [ + 1871, + 274 + ], + [ + 1879, + 278 + ], + [ + 1885, + 285 + ], + [ + 1888, + 298 + ], + [ + 1896, + 302 + ], + [ + 1907, + 307 + ], + [ + 1913, + 321 + ], + [ + 1914, + 327 + ], + [ + 1924, + 336 + ], + [ + 1936, + 346 + ], + [ + 1949, + 372 + ], + [ + 1958, + 387 + ], + [ + 1958, + 435 + ], + [ + 1958, + 469 + ], + [ + 1949, + 494 + ], + [ + 1945, + 504 + ], + [ + 1943, + 513 + ], + [ + 1944, + 521 + ], + [ + 1928, + 518 + ], + [ + 1918, + 509 + ], + [ + 1919, + 502 + ], + [ + 1918, + 488 + ], + [ + 1915, + 484 + ], + [ + 1911, + 514 + ], + [ + 1905, + 536 + ], + [ + 1907, + 555 + ], + [ + 1908, + 577 + ], + [ + 1905, + 602 + ], + [ + 1910, + 631 + ], + [ + 1915, + 656 + ], + [ + 1918, + 672 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 716, + 401 + ], + [ + 697, + 393 + ], + [ + 679, + 392 + ], + [ + 668, + 395 + ], + [ + 647, + 404 + ], + [ + 635, + 418 + ], + [ + 638, + 424 + ], + [ + 661, + 426 + ], + [ + 673, + 421 + ], + [ + 680, + 421 + ], + [ + 678, + 424 + ], + [ + 678, + 429 + ], + [ + 674, + 439 + ], + [ + 674, + 446 + ], + [ + 676, + 454 + ], + [ + 682, + 454 + ], + [ + 678, + 463 + ], + [ + 675, + 480 + ], + [ + 671, + 497 + ], + [ + 675, + 499 + ], + [ + 677, + 506 + ], + [ + 680, + 514 + ], + [ + 678, + 521 + ], + [ + 677, + 526 + ], + [ + 672, + 530 + ], + [ + 672, + 533 + ], + [ + 675, + 534 + ], + [ + 683, + 533 + ], + [ + 691, + 531 + ], + [ + 692, + 527 + ], + [ + 689, + 520 + ], + [ + 689, + 494 + ], + [ + 697, + 494 + ], + [ + 701, + 499 + ], + [ + 701, + 506 + ], + [ + 703, + 516 + ], + [ + 700, + 534 + ], + [ + 703, + 537 + ], + [ + 708, + 536 + ], + [ + 711, + 532 + ], + [ + 711, + 525 + ], + [ + 709, + 517 + ], + [ + 711, + 507 + ], + [ + 711, + 492 + ], + [ + 717, + 489 + ], + [ + 721, + 484 + ], + [ + 721, + 454 + ], + [ + 715, + 439 + ], + [ + 709, + 421 + ], + [ + 699, + 412 + ], + [ + 700, + 412 + ], + [ + 715, + 407 + ], + [ + 722, + 401 + ], + [ + 716, + 401 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000069_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000069_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..7d53d50e965cb829586b439b3b9362823c37b1a7 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000069_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000070_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000070_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..4b7baba448e1eb261e2709bf543db6763996fffe Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000070_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000070_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000070_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..52edc79d9ce4f8be37866e5466a59756aaf3d7f4 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000070_000019_gtFine_polygons.json @@ -0,0 +1,4493 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1300, + 0 + ], + [ + 763, + 0 + ], + [ + 846, + 41 + ], + [ + 1183, + 65 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2047, + 573 + ], + [ + 0, + 651 + ], + [ + 0, + 1 + ], + [ + 832, + 1 + ], + [ + 1029, + 25 + ], + [ + 1039, + 28 + ], + [ + 1046, + 36 + ], + [ + 1056, + 32 + ], + [ + 1073, + 33 + ], + [ + 1080, + 30 + ], + [ + 1086, + 30 + ], + [ + 1093, + 33 + ], + [ + 1104, + 31 + ], + [ + 1116, + 31 + ], + [ + 1124, + 31 + ], + [ + 1124, + 8 + ], + [ + 1147, + 1 + ], + [ + 2048, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1059, + 451 + ], + [ + 965, + 452 + ], + [ + 717, + 493 + ], + [ + 0, + 578 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2047, + 698 + ], + [ + 1165, + 453 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1194, + 447 + ], + [ + 1131, + 448 + ], + [ + 1062, + 450 + ], + [ + 1061, + 456 + ], + [ + 1074, + 458 + ], + [ + 1112, + 457 + ], + [ + 1140, + 457 + ], + [ + 1160, + 458 + ], + [ + 1169, + 463 + ], + [ + 1200, + 480 + ], + [ + 1301, + 532 + ], + [ + 1380, + 579 + ], + [ + 1490, + 647 + ], + [ + 1549, + 682 + ], + [ + 2048, + 960 + ], + [ + 2048, + 476 + ], + [ + 1295, + 460 + ], + [ + 1276, + 457 + ], + [ + 1260, + 457 + ], + [ + 1224, + 450 + ], + [ + 1214, + 448 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 929, + 427 + ], + [ + 927, + 387 + ], + [ + 930, + 379 + ], + [ + 932, + 368 + ], + [ + 941, + 369 + ], + [ + 949, + 374 + ], + [ + 958, + 378 + ], + [ + 959, + 369 + ], + [ + 962, + 364 + ], + [ + 973, + 365 + ], + [ + 970, + 370 + ], + [ + 970, + 372 + ], + [ + 973, + 374 + ], + [ + 981, + 373 + ], + [ + 981, + 367 + ], + [ + 985, + 367 + ], + [ + 990, + 363 + ], + [ + 991, + 357 + ], + [ + 979, + 354 + ], + [ + 976, + 349 + ], + [ + 982, + 344 + ], + [ + 987, + 350 + ], + [ + 999, + 349 + ], + [ + 994, + 342 + ], + [ + 1002, + 343 + ], + [ + 1013, + 338 + ], + [ + 1006, + 334 + ], + [ + 1015, + 330 + ], + [ + 1028, + 330 + ], + [ + 1038, + 328 + ], + [ + 1041, + 321 + ], + [ + 1037, + 317 + ], + [ + 1034, + 311 + ], + [ + 1029, + 309 + ], + [ + 1031, + 303 + ], + [ + 1028, + 299 + ], + [ + 1019, + 296 + ], + [ + 1006, + 274 + ], + [ + 992, + 281 + ], + [ + 989, + 254 + ], + [ + 980, + 248 + ], + [ + 970, + 251 + ], + [ + 961, + 248 + ], + [ + 950, + 251 + ], + [ + 941, + 252 + ], + [ + 937, + 250 + ], + [ + 947, + 244 + ], + [ + 949, + 226 + ], + [ + 945, + 213 + ], + [ + 937, + 209 + ], + [ + 929, + 193 + ], + [ + 920, + 192 + ], + [ + 915, + 186 + ], + [ + 904, + 186 + ], + [ + 899, + 195 + ], + [ + 895, + 201 + ], + [ + 889, + 208 + ], + [ + 885, + 224 + ], + [ + 879, + 225 + ], + [ + 869, + 222 + ], + [ + 868, + 238 + ], + [ + 865, + 245 + ], + [ + 860, + 238 + ], + [ + 857, + 243 + ], + [ + 851, + 243 + ], + [ + 849, + 251 + ], + [ + 846, + 239 + ], + [ + 842, + 240 + ], + [ + 844, + 336 + ], + [ + 856, + 353 + ], + [ + 869, + 353 + ], + [ + 882, + 347 + ], + [ + 886, + 348 + ], + [ + 887, + 359 + ], + [ + 900, + 366 + ], + [ + 909, + 366 + ], + [ + 918, + 376 + ], + [ + 921, + 389 + ], + [ + 918, + 399 + ], + [ + 917, + 433 + ], + [ + 930, + 436 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 922, + 439 + ], + [ + 922, + 387 + ], + [ + 920, + 383 + ], + [ + 907, + 385 + ], + [ + 905, + 388 + ], + [ + 897, + 388 + ], + [ + 893, + 360 + ], + [ + 888, + 358 + ], + [ + 886, + 345 + ], + [ + 882, + 344 + ], + [ + 879, + 348 + ], + [ + 877, + 348 + ], + [ + 871, + 354 + ], + [ + 867, + 353 + ], + [ + 868, + 403 + ], + [ + 874, + 413 + ], + [ + 873, + 439 + ], + [ + 913, + 448 + ], + [ + 922, + 441 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 946, + 376 + ], + [ + 931, + 376 + ], + [ + 932, + 406 + ], + [ + 946, + 406 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 941, + 430 + ], + [ + 940, + 376 + ], + [ + 938, + 376 + ], + [ + 939, + 429 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 856, + 357 + ], + [ + 855, + 335 + ], + [ + 826, + 335 + ], + [ + 826, + 357 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 826, + 325 + ], + [ + 794, + 325 + ], + [ + 795, + 351 + ], + [ + 827, + 351 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 967, + 431 + ], + [ + 940, + 433 + ], + [ + 947, + 468 + ], + [ + 953, + 476 + ], + [ + 970, + 476 + ], + [ + 980, + 455 + ], + [ + 976, + 431 + ], + [ + 968, + 432 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 932, + 424 + ], + [ + 911, + 425 + ], + [ + 903, + 426 + ], + [ + 900, + 430 + ], + [ + 898, + 437 + ], + [ + 940, + 484 + ], + [ + 947, + 482 + ], + [ + 952, + 480 + ], + [ + 955, + 471 + ], + [ + 952, + 461 + ], + [ + 948, + 452 + ], + [ + 949, + 445 + ], + [ + 952, + 445 + ], + [ + 954, + 442 + ], + [ + 953, + 439 + ], + [ + 948, + 439 + ], + [ + 945, + 441 + ], + [ + 942, + 431 + ], + [ + 940, + 427 + ], + [ + 936, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 930, + 481 + ], + [ + 919, + 481 + ], + [ + 903, + 482 + ], + [ + 887, + 438 + ], + [ + 891, + 435 + ], + [ + 906, + 433 + ], + [ + 922, + 433 + ], + [ + 929, + 435 + ], + [ + 934, + 440 + ], + [ + 937, + 444 + ], + [ + 940, + 444 + ], + [ + 944, + 447 + ], + [ + 943, + 449 + ], + [ + 939, + 452 + ], + [ + 942, + 456 + ], + [ + 946, + 477 + ], + [ + 944, + 483 + ], + [ + 937, + 487 + ], + [ + 931, + 486 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1127, + 277 + ], + [ + 1118, + 289 + ], + [ + 1123, + 291 + ], + [ + 1117, + 296 + ], + [ + 1110, + 298 + ], + [ + 1110, + 305 + ], + [ + 1107, + 312 + ], + [ + 1104, + 314 + ], + [ + 1104, + 321 + ], + [ + 1109, + 326 + ], + [ + 1105, + 332 + ], + [ + 1099, + 341 + ], + [ + 1103, + 352 + ], + [ + 1115, + 365 + ], + [ + 1125, + 380 + ], + [ + 1128, + 454 + ], + [ + 1131, + 454 + ], + [ + 1130, + 375 + ], + [ + 1136, + 356 + ], + [ + 1153, + 348 + ], + [ + 1161, + 334 + ], + [ + 1160, + 325 + ], + [ + 1156, + 307 + ], + [ + 1149, + 299 + ], + [ + 1138, + 296 + ], + [ + 1132, + 284 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1132, + 405 + ], + [ + 1141, + 387 + ], + [ + 1123, + 388 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1068, + 323 + ], + [ + 1108, + 334 + ], + [ + 1113, + 336 + ], + [ + 1116, + 339 + ], + [ + 1117, + 344 + ], + [ + 1118, + 353 + ], + [ + 1119, + 456 + ], + [ + 1123, + 457 + ], + [ + 1120, + 347 + ], + [ + 1119, + 340 + ], + [ + 1116, + 335 + ], + [ + 1110, + 332 + ], + [ + 1068, + 321 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1058, + 315 + ], + [ + 1059, + 341 + ], + [ + 1070, + 341 + ], + [ + 1069, + 317 + ], + [ + 1069, + 315 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1125, + 407 + ], + [ + 1116, + 407 + ], + [ + 1116, + 385 + ], + [ + 1126, + 385 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1141, + 445 + ], + [ + 1143, + 437 + ], + [ + 1146, + 430 + ], + [ + 1153, + 428 + ], + [ + 1158, + 428 + ], + [ + 1158, + 432 + ], + [ + 1162, + 437 + ], + [ + 1164, + 440 + ], + [ + 1161, + 443 + ], + [ + 1159, + 445 + ], + [ + 1159, + 454 + ], + [ + 1155, + 454 + ], + [ + 1155, + 450 + ], + [ + 1151, + 450 + ], + [ + 1147, + 452 + ], + [ + 1146, + 456 + ], + [ + 1141, + 455 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1171, + 359 + ], + [ + 1171, + 352 + ], + [ + 1184, + 348 + ], + [ + 1192, + 354 + ], + [ + 1191, + 360 + ], + [ + 1185, + 364 + ], + [ + 1187, + 369 + ], + [ + 1183, + 369 + ], + [ + 1180, + 367 + ], + [ + 1179, + 391 + ], + [ + 1187, + 391 + ], + [ + 1187, + 385 + ], + [ + 1189, + 382 + ], + [ + 1194, + 381 + ], + [ + 1197, + 384 + ], + [ + 1194, + 389 + ], + [ + 1196, + 395 + ], + [ + 1195, + 405 + ], + [ + 1201, + 406 + ], + [ + 1205, + 405 + ], + [ + 1205, + 402 + ], + [ + 1201, + 396 + ], + [ + 1203, + 390 + ], + [ + 1205, + 388 + ], + [ + 1204, + 343 + ], + [ + 1200, + 339 + ], + [ + 1199, + 330 + ], + [ + 1193, + 327 + ], + [ + 1201, + 324 + ], + [ + 1208, + 323 + ], + [ + 1215, + 327 + ], + [ + 1209, + 329 + ], + [ + 1209, + 339 + ], + [ + 1207, + 343 + ], + [ + 1207, + 387 + ], + [ + 1210, + 391 + ], + [ + 1210, + 396 + ], + [ + 1209, + 400 + ], + [ + 1208, + 402 + ], + [ + 1209, + 425 + ], + [ + 1213, + 424 + ], + [ + 1220, + 424 + ], + [ + 1222, + 428 + ], + [ + 1226, + 434 + ], + [ + 1226, + 451 + ], + [ + 1216, + 463 + ], + [ + 1200, + 465 + ], + [ + 1180, + 458 + ], + [ + 1174, + 457 + ], + [ + 1166, + 454 + ], + [ + 1168, + 432 + ], + [ + 1175, + 428 + ], + [ + 1176, + 396 + ], + [ + 1178, + 376 + ], + [ + 1177, + 367 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1254, + 459 + ], + [ + 1248, + 346 + ], + [ + 1246, + 289 + ], + [ + 1244, + 287 + ], + [ + 1245, + 344 + ], + [ + 1248, + 458 + ], + [ + 1250, + 463 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 800, + 357 + ], + [ + 797, + 364 + ], + [ + 798, + 369 + ], + [ + 798, + 374 + ], + [ + 797, + 379 + ], + [ + 798, + 394 + ], + [ + 802, + 395 + ], + [ + 802, + 403 + ], + [ + 807, + 404 + ], + [ + 807, + 395 + ], + [ + 813, + 393 + ], + [ + 812, + 377 + ], + [ + 812, + 373 + ], + [ + 809, + 361 + ], + [ + 806, + 356 + ], + [ + 803, + 356 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 900, + 455 + ], + [ + 900, + 452 + ], + [ + 897, + 449 + ], + [ + 895, + 447 + ], + [ + 894, + 443 + ], + [ + 889, + 435 + ], + [ + 884, + 430 + ], + [ + 875, + 428 + ], + [ + 866, + 428 + ], + [ + 867, + 489 + ], + [ + 870, + 503 + ], + [ + 886, + 501 + ], + [ + 894, + 499 + ], + [ + 903, + 497 + ], + [ + 906, + 495 + ], + [ + 907, + 487 + ], + [ + 906, + 476 + ], + [ + 904, + 467 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 853, + 406 + ], + [ + 816, + 407 + ], + [ + 824, + 481 + ], + [ + 831, + 504 + ], + [ + 851, + 503 + ], + [ + 852, + 507 + ], + [ + 854, + 510 + ], + [ + 860, + 510 + ], + [ + 869, + 509 + ], + [ + 875, + 506 + ], + [ + 884, + 504 + ], + [ + 887, + 496 + ], + [ + 885, + 474 + ], + [ + 883, + 461 + ], + [ + 881, + 452 + ], + [ + 876, + 437 + ], + [ + 872, + 427 + ], + [ + 867, + 414 + ], + [ + 862, + 406 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 810, + 400 + ], + [ + 775, + 400 + ], + [ + 735, + 404 + ], + [ + 724, + 408 + ], + [ + 714, + 417 + ], + [ + 702, + 443 + ], + [ + 736, + 528 + ], + [ + 740, + 524 + ], + [ + 742, + 516 + ], + [ + 743, + 513 + ], + [ + 746, + 513 + ], + [ + 758, + 515 + ], + [ + 791, + 513 + ], + [ + 793, + 514 + ], + [ + 794, + 520 + ], + [ + 796, + 523 + ], + [ + 797, + 524 + ], + [ + 808, + 523 + ], + [ + 809, + 517 + ], + [ + 810, + 504 + ], + [ + 819, + 501 + ], + [ + 819, + 511 + ], + [ + 820, + 516 + ], + [ + 822, + 518 + ], + [ + 831, + 517 + ], + [ + 834, + 514 + ], + [ + 834, + 499 + ], + [ + 835, + 482 + ], + [ + 834, + 472 + ], + [ + 831, + 446 + ], + [ + 826, + 424 + ], + [ + 820, + 406 + ], + [ + 816, + 402 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 763, + 499 + ], + [ + 762, + 490 + ], + [ + 735, + 491 + ], + [ + 734, + 499 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 659, + 416 + ], + [ + 662, + 374 + ], + [ + 674, + 360 + ], + [ + 699, + 347 + ], + [ + 715, + 355 + ], + [ + 726, + 375 + ], + [ + 767, + 388 + ], + [ + 791, + 365 + ], + [ + 785, + 327 + ], + [ + 770, + 315 + ], + [ + 755, + 287 + ], + [ + 733, + 250 + ], + [ + 721, + 235 + ], + [ + 701, + 210 + ], + [ + 683, + 208 + ], + [ + 680, + 172 + ], + [ + 638, + 146 + ], + [ + 607, + 164 + ], + [ + 606, + 180 + ], + [ + 586, + 184 + ], + [ + 544, + 217 + ], + [ + 541, + 244 + ], + [ + 519, + 260 + ], + [ + 513, + 294 + ], + [ + 483, + 304 + ], + [ + 487, + 342 + ], + [ + 489, + 372 + ], + [ + 522, + 393 + ], + [ + 534, + 378 + ], + [ + 533, + 364 + ], + [ + 565, + 384 + ], + [ + 574, + 401 + ], + [ + 602, + 402 + ], + [ + 626, + 420 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 614, + 368 + ], + [ + 610, + 319 + ], + [ + 597, + 320 + ], + [ + 598, + 370 + ], + [ + 614, + 369 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 611, + 416 + ], + [ + 604, + 316 + ], + [ + 599, + 316 + ], + [ + 605, + 418 + ], + [ + 607, + 423 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 638, + 413 + ], + [ + 604, + 415 + ], + [ + 576, + 419 + ], + [ + 558, + 423 + ], + [ + 540, + 434 + ], + [ + 611, + 555 + ], + [ + 649, + 554 + ], + [ + 654, + 562 + ], + [ + 657, + 566 + ], + [ + 667, + 566 + ], + [ + 678, + 564 + ], + [ + 684, + 557 + ], + [ + 685, + 543 + ], + [ + 707, + 535 + ], + [ + 708, + 543 + ], + [ + 710, + 550 + ], + [ + 713, + 553 + ], + [ + 723, + 552 + ], + [ + 733, + 549 + ], + [ + 736, + 543 + ], + [ + 739, + 522 + ], + [ + 739, + 509 + ], + [ + 738, + 492 + ], + [ + 732, + 473 + ], + [ + 724, + 464 + ], + [ + 716, + 460 + ], + [ + 714, + 459 + ], + [ + 704, + 439 + ], + [ + 692, + 424 + ], + [ + 682, + 417 + ], + [ + 660, + 414 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 520, + 416 + ], + [ + 469, + 415 + ], + [ + 425, + 418 + ], + [ + 405, + 423 + ], + [ + 444, + 599 + ], + [ + 460, + 605 + ], + [ + 493, + 602 + ], + [ + 500, + 608 + ], + [ + 516, + 611 + ], + [ + 533, + 609 + ], + [ + 548, + 601 + ], + [ + 553, + 587 + ], + [ + 554, + 577 + ], + [ + 581, + 569 + ], + [ + 583, + 579 + ], + [ + 588, + 588 + ], + [ + 599, + 589 + ], + [ + 612, + 585 + ], + [ + 616, + 576 + ], + [ + 619, + 556 + ], + [ + 617, + 529 + ], + [ + 619, + 509 + ], + [ + 617, + 494 + ], + [ + 611, + 477 + ], + [ + 606, + 464 + ], + [ + 592, + 457 + ], + [ + 579, + 452 + ], + [ + 561, + 432 + ], + [ + 547, + 421 + ], + [ + 538, + 417 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 274, + 409 + ], + [ + 235, + 411 + ], + [ + 179, + 416 + ], + [ + 150, + 427 + ], + [ + 122, + 445 + ], + [ + 80, + 481 + ], + [ + 57, + 509 + ], + [ + 48, + 645 + ], + [ + 64, + 663 + ], + [ + 200, + 659 + ], + [ + 291, + 655 + ], + [ + 295, + 668 + ], + [ + 302, + 674 + ], + [ + 317, + 672 + ], + [ + 330, + 671 + ], + [ + 341, + 665 + ], + [ + 345, + 631 + ], + [ + 414, + 606 + ], + [ + 419, + 605 + ], + [ + 420, + 619 + ], + [ + 423, + 631 + ], + [ + 426, + 635 + ], + [ + 436, + 636 + ], + [ + 448, + 633 + ], + [ + 456, + 628 + ], + [ + 458, + 595 + ], + [ + 458, + 555 + ], + [ + 457, + 531 + ], + [ + 446, + 491 + ], + [ + 434, + 457 + ], + [ + 417, + 427 + ], + [ + 407, + 419 + ], + [ + 398, + 416 + ], + [ + 351, + 410 + ], + [ + 322, + 409 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 193, + 600 + ], + [ + 86, + 606 + ], + [ + 88, + 628 + ], + [ + 196, + 620 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 34, + 411 + ], + [ + 15, + 385 + ], + [ + 0, + 369 + ], + [ + 0, + 774 + ], + [ + 5, + 775 + ], + [ + 24, + 771 + ], + [ + 43, + 759 + ], + [ + 57, + 739 + ], + [ + 68, + 711 + ], + [ + 70, + 672 + ], + [ + 68, + 648 + ], + [ + 65, + 629 + ], + [ + 71, + 599 + ], + [ + 71, + 563 + ], + [ + 66, + 527 + ], + [ + 68, + 502 + ], + [ + 63, + 463 + ], + [ + 50, + 435 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1554, + 364 + ], + [ + 1328, + 372 + ], + [ + 1317, + 375 + ], + [ + 1305, + 378 + ], + [ + 1305, + 417 + ], + [ + 1566, + 432 + ], + [ + 1564, + 369 + ], + [ + 1556, + 364 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1476, + 444 + ], + [ + 1482, + 401 + ], + [ + 1480, + 368 + ], + [ + 1470, + 358 + ], + [ + 1470, + 349 + ], + [ + 1467, + 330 + ], + [ + 1464, + 327 + ], + [ + 1452, + 294 + ], + [ + 1459, + 292 + ], + [ + 1459, + 277 + ], + [ + 1465, + 267 + ], + [ + 1454, + 259 + ], + [ + 1449, + 277 + ], + [ + 1442, + 295 + ], + [ + 1442, + 308 + ], + [ + 1436, + 325 + ], + [ + 1424, + 335 + ], + [ + 1406, + 340 + ], + [ + 1398, + 346 + ], + [ + 1389, + 344 + ], + [ + 1370, + 348 + ], + [ + 1359, + 345 + ], + [ + 1360, + 328 + ], + [ + 1357, + 319 + ], + [ + 1339, + 332 + ], + [ + 1326, + 334 + ], + [ + 1317, + 331 + ], + [ + 1308, + 340 + ], + [ + 1309, + 346 + ], + [ + 1317, + 354 + ], + [ + 1305, + 366 + ], + [ + 1297, + 359 + ], + [ + 1285, + 351 + ], + [ + 1269, + 357 + ], + [ + 1251, + 349 + ], + [ + 1245, + 347 + ], + [ + 1245, + 343 + ], + [ + 1240, + 342 + ], + [ + 1236, + 330 + ], + [ + 1234, + 317 + ], + [ + 1217, + 297 + ], + [ + 1208, + 283 + ], + [ + 1230, + 273 + ], + [ + 1214, + 273 + ], + [ + 1212, + 263 + ], + [ + 1177, + 239 + ], + [ + 1215, + 217 + ], + [ + 1245, + 217 + ], + [ + 1245, + 212 + ], + [ + 1238, + 199 + ], + [ + 1237, + 183 + ], + [ + 1245, + 176 + ], + [ + 1234, + 171 + ], + [ + 1220, + 171 + ], + [ + 1217, + 178 + ], + [ + 1210, + 188 + ], + [ + 1201, + 189 + ], + [ + 1198, + 180 + ], + [ + 1183, + 192 + ], + [ + 1179, + 190 + ], + [ + 1183, + 177 + ], + [ + 1181, + 172 + ], + [ + 1194, + 154 + ], + [ + 1192, + 148 + ], + [ + 1198, + 137 + ], + [ + 1190, + 131 + ], + [ + 1186, + 124 + ], + [ + 1174, + 126 + ], + [ + 1158, + 123 + ], + [ + 1173, + 109 + ], + [ + 1180, + 109 + ], + [ + 1187, + 105 + ], + [ + 1195, + 96 + ], + [ + 1193, + 93 + ], + [ + 1192, + 85 + ], + [ + 1187, + 82 + ], + [ + 1183, + 70 + ], + [ + 1183, + 57 + ], + [ + 1190, + 63 + ], + [ + 1187, + 52 + ], + [ + 1166, + 47 + ], + [ + 1152, + 49 + ], + [ + 1140, + 37 + ], + [ + 1156, + 34 + ], + [ + 1172, + 35 + ], + [ + 1173, + 20 + ], + [ + 1181, + 14 + ], + [ + 1195, + 13 + ], + [ + 1210, + 22 + ], + [ + 1214, + 24 + ], + [ + 1220, + 21 + ], + [ + 1240, + 33 + ], + [ + 1235, + 17 + ], + [ + 1242, + 14 + ], + [ + 1244, + 2 + ], + [ + 1961, + 2 + ], + [ + 1970, + 4 + ], + [ + 1956, + 17 + ], + [ + 1969, + 21 + ], + [ + 2009, + 6 + ], + [ + 2018, + 8 + ], + [ + 2018, + 16 + ], + [ + 2009, + 30 + ], + [ + 2004, + 45 + ], + [ + 1975, + 71 + ], + [ + 1956, + 93 + ], + [ + 1943, + 89 + ], + [ + 1923, + 84 + ], + [ + 1923, + 96 + ], + [ + 1928, + 110 + ], + [ + 1978, + 103 + ], + [ + 1977, + 115 + ], + [ + 1948, + 137 + ], + [ + 1937, + 151 + ], + [ + 1922, + 154 + ], + [ + 1888, + 138 + ], + [ + 1876, + 129 + ], + [ + 1848, + 133 + ], + [ + 1835, + 141 + ], + [ + 1836, + 147 + ], + [ + 1800, + 147 + ], + [ + 1802, + 160 + ], + [ + 1814, + 159 + ], + [ + 1817, + 167 + ], + [ + 1823, + 196 + ], + [ + 1835, + 197 + ], + [ + 1840, + 206 + ], + [ + 1831, + 212 + ], + [ + 1831, + 225 + ], + [ + 1843, + 224 + ], + [ + 1854, + 229 + ], + [ + 1862, + 225 + ], + [ + 1857, + 216 + ], + [ + 1861, + 209 + ], + [ + 1873, + 215 + ], + [ + 1881, + 231 + ], + [ + 1888, + 244 + ], + [ + 1906, + 253 + ], + [ + 1909, + 219 + ], + [ + 1916, + 212 + ], + [ + 1922, + 221 + ], + [ + 1921, + 234 + ], + [ + 1921, + 259 + ], + [ + 1930, + 259 + ], + [ + 1936, + 265 + ], + [ + 1965, + 264 + ], + [ + 1994, + 263 + ], + [ + 1994, + 267 + ], + [ + 1981, + 274 + ], + [ + 1989, + 282 + ], + [ + 1985, + 284 + ], + [ + 1975, + 286 + ], + [ + 1963, + 291 + ], + [ + 1954, + 294 + ], + [ + 1946, + 304 + ], + [ + 1943, + 321 + ], + [ + 1980, + 316 + ], + [ + 1991, + 314 + ], + [ + 1978, + 327 + ], + [ + 1980, + 332 + ], + [ + 2005, + 331 + ], + [ + 2021, + 325 + ], + [ + 2035, + 320 + ], + [ + 2047, + 319 + ], + [ + 2048, + 319 + ], + [ + 2048, + 457 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 1596, + 414 + ], + [ + 1473, + 415 + ], + [ + 1324, + 415 + ], + [ + 1292, + 418 + ], + [ + 1293, + 463 + ], + [ + 1408, + 486 + ], + [ + 1597, + 514 + ], + [ + 1773, + 552 + ], + [ + 2048, + 541 + ], + [ + 2048, + 404 + ], + [ + 1806, + 414 + ], + [ + 1769, + 411 + ], + [ + 1684, + 412 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 2024, + 620 + ], + [ + 1992, + 611 + ], + [ + 1929, + 613 + ], + [ + 1860, + 620 + ], + [ + 1848, + 637 + ], + [ + 1856, + 723 + ], + [ + 1882, + 738 + ], + [ + 1918, + 740 + ], + [ + 2028, + 729 + ], + [ + 2035, + 712 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1336, + 211 + ], + [ + 1342, + 430 + ], + [ + 1340, + 435 + ], + [ + 1342, + 480 + ], + [ + 1351, + 483 + ], + [ + 1350, + 435 + ], + [ + 1348, + 430 + ], + [ + 1341, + 210 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1309, + 179 + ], + [ + 1311, + 177 + ], + [ + 1328, + 173 + ], + [ + 1330, + 166 + ], + [ + 1345, + 166 + ], + [ + 1347, + 172 + ], + [ + 1363, + 174 + ], + [ + 1365, + 177 + ], + [ + 1365, + 179 + ], + [ + 1350, + 181 + ], + [ + 1350, + 206 + ], + [ + 1341, + 215 + ], + [ + 1335, + 216 + ], + [ + 1327, + 207 + ], + [ + 1327, + 183 + ], + [ + 1311, + 181 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1334, + 320 + ], + [ + 1330, + 328 + ], + [ + 1329, + 336 + ], + [ + 1330, + 345 + ], + [ + 1335, + 349 + ], + [ + 1340, + 351 + ], + [ + 1345, + 349 + ], + [ + 1349, + 343 + ], + [ + 1350, + 334 + ], + [ + 1348, + 326 + ], + [ + 1345, + 321 + ], + [ + 1341, + 318 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1590, + 376 + ], + [ + 1595, + 522 + ], + [ + 1642, + 538 + ], + [ + 1689, + 533 + ], + [ + 1683, + 358 + ], + [ + 1668, + 354 + ], + [ + 1629, + 358 + ], + [ + 1610, + 363 + ], + [ + 1609, + 371 + ], + [ + 1595, + 373 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1512, + 513 + ], + [ + 1509, + 503 + ], + [ + 1503, + 499 + ], + [ + 1473, + 499 + ], + [ + 1470, + 492 + ], + [ + 1430, + 494 + ], + [ + 1430, + 490 + ], + [ + 1415, + 487 + ], + [ + 1392, + 483 + ], + [ + 1356, + 476 + ], + [ + 1339, + 476 + ], + [ + 1337, + 472 + ], + [ + 1294, + 464 + ], + [ + 1285, + 462 + ], + [ + 1270, + 459 + ], + [ + 1237, + 452 + ], + [ + 1216, + 454 + ], + [ + 1215, + 470 + ], + [ + 1228, + 473 + ], + [ + 1237, + 477 + ], + [ + 1263, + 484 + ], + [ + 1273, + 490 + ], + [ + 1279, + 493 + ], + [ + 1291, + 493 + ], + [ + 1298, + 497 + ], + [ + 1304, + 501 + ], + [ + 1320, + 498 + ], + [ + 1322, + 503 + ], + [ + 1326, + 504 + ], + [ + 1326, + 508 + ], + [ + 1339, + 510 + ], + [ + 1347, + 514 + ], + [ + 1357, + 515 + ], + [ + 1363, + 517 + ], + [ + 1364, + 521 + ], + [ + 1372, + 521 + ], + [ + 1377, + 526 + ], + [ + 1377, + 530 + ], + [ + 1408, + 530 + ], + [ + 1412, + 538 + ], + [ + 1437, + 539 + ], + [ + 1448, + 545 + ], + [ + 1448, + 551 + ], + [ + 1505, + 549 + ], + [ + 1505, + 542 + ], + [ + 1509, + 554 + ], + [ + 1510, + 559 + ], + [ + 1513, + 562 + ], + [ + 1523, + 567 + ], + [ + 1538, + 572 + ], + [ + 1582, + 567 + ], + [ + 1580, + 507 + ], + [ + 1536, + 512 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1529, + 598 + ], + [ + 1524, + 458 + ], + [ + 1511, + 458 + ], + [ + 1517, + 598 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1573, + 457 + ], + [ + 1579, + 621 + ], + [ + 1594, + 621 + ], + [ + 1587, + 457 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1661, + 650 + ], + [ + 1648, + 469 + ], + [ + 1629, + 470 + ], + [ + 1644, + 650 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1738, + 680 + ], + [ + 1729, + 468 + ], + [ + 1756, + 468 + ], + [ + 1761, + 678 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1003, + 401 + ], + [ + 1000, + 395 + ], + [ + 997, + 386 + ], + [ + 994, + 381 + ], + [ + 989, + 380 + ], + [ + 983, + 382 + ], + [ + 979, + 386 + ], + [ + 978, + 392 + ], + [ + 977, + 399 + ], + [ + 970, + 404 + ], + [ + 968, + 408 + ], + [ + 964, + 423 + ], + [ + 963, + 434 + ], + [ + 958, + 446 + ], + [ + 956, + 457 + ], + [ + 952, + 464 + ], + [ + 951, + 473 + ], + [ + 953, + 478 + ], + [ + 950, + 484 + ], + [ + 939, + 492 + ], + [ + 942, + 498 + ], + [ + 942, + 502 + ], + [ + 941, + 512 + ], + [ + 940, + 520 + ], + [ + 944, + 527 + ], + [ + 955, + 527 + ], + [ + 963, + 526 + ], + [ + 960, + 539 + ], + [ + 954, + 539 + ], + [ + 952, + 543 + ], + [ + 953, + 545 + ], + [ + 961, + 549 + ], + [ + 969, + 551 + ], + [ + 973, + 550 + ], + [ + 976, + 532 + ], + [ + 978, + 517 + ], + [ + 982, + 496 + ], + [ + 988, + 481 + ], + [ + 992, + 503 + ], + [ + 997, + 525 + ], + [ + 995, + 546 + ], + [ + 989, + 552 + ], + [ + 991, + 555 + ], + [ + 1002, + 554 + ], + [ + 1008, + 553 + ], + [ + 1009, + 535 + ], + [ + 1009, + 512 + ], + [ + 1004, + 499 + ], + [ + 1007, + 482 + ], + [ + 1015, + 464 + ], + [ + 1014, + 443 + ], + [ + 1014, + 442 + ], + [ + 1016, + 440 + ], + [ + 1022, + 445 + ], + [ + 1045, + 447 + ], + [ + 1056, + 433 + ], + [ + 1034, + 422 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1015, + 392 + ], + [ + 1011, + 384 + ], + [ + 1011, + 374 + ], + [ + 1018, + 369 + ], + [ + 1026, + 370 + ], + [ + 1030, + 374 + ], + [ + 1030, + 383 + ], + [ + 1030, + 391 + ], + [ + 1034, + 390 + ], + [ + 1043, + 393 + ], + [ + 1049, + 403 + ], + [ + 1054, + 410 + ], + [ + 1058, + 410 + ], + [ + 1066, + 419 + ], + [ + 1071, + 430 + ], + [ + 1071, + 447 + ], + [ + 1066, + 459 + ], + [ + 1061, + 462 + ], + [ + 1058, + 467 + ], + [ + 1052, + 466 + ], + [ + 1050, + 465 + ], + [ + 1048, + 484 + ], + [ + 1052, + 502 + ], + [ + 1056, + 526 + ], + [ + 1057, + 540 + ], + [ + 1055, + 547 + ], + [ + 1054, + 554 + ], + [ + 1039, + 552 + ], + [ + 1038, + 550 + ], + [ + 1041, + 547 + ], + [ + 1036, + 514 + ], + [ + 1029, + 489 + ], + [ + 1024, + 507 + ], + [ + 1018, + 517 + ], + [ + 1010, + 544 + ], + [ + 1008, + 548 + ], + [ + 1008, + 513 + ], + [ + 1005, + 506 + ], + [ + 1008, + 495 + ], + [ + 1008, + 482 + ], + [ + 1011, + 464 + ], + [ + 1012, + 449 + ], + [ + 1013, + 440 + ], + [ + 1015, + 435 + ], + [ + 1022, + 444 + ], + [ + 1038, + 444 + ], + [ + 1045, + 441 + ], + [ + 1046, + 436 + ], + [ + 1040, + 433 + ], + [ + 1038, + 432 + ], + [ + 1026, + 433 + ], + [ + 1015, + 412 + ], + [ + 1004, + 415 + ], + [ + 984, + 417 + ], + [ + 975, + 417 + ], + [ + 962, + 421 + ], + [ + 960, + 416 + ], + [ + 961, + 413 + ], + [ + 968, + 409 + ], + [ + 971, + 410 + ], + [ + 990, + 406 + ], + [ + 998, + 405 + ], + [ + 1004, + 396 + ], + [ + 1010, + 394 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000071_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000071_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..a0893822d71bdebfdd664d4e90666abf5bc0b0a9 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000071_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000071_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000071_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..85013961f3a84c34e3295a6345f2c17e9bed57a4 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000071_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..0364b672202184530d6afec9b213e0aadd553168 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..b36f987c870d91623bed86e63fa12fd42a0ee478 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..242f082cf89447e218377274063f4e64a577ccf1 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000072_000019_gtFine_polygons.json @@ -0,0 +1,7667 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1635, + 0 + ], + [ + 1594, + 56 + ], + [ + 1313, + 273 + ], + [ + 1089, + 295 + ], + [ + 747, + 159 + ], + [ + 659, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 0, + 537 + ], + [ + 1024, + 404 + ], + [ + 1406, + 388 + ], + [ + 2047, + 496 + ], + [ + 2047, + 1023 + ], + [ + 0, + 1023 + ], + [ + 0, + 532 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2048, + 523 + ], + [ + 1386, + 430 + ], + [ + 990, + 438 + ], + [ + 1, + 592 + ], + [ + 0, + 1 + ], + [ + 752, + 1 + ], + [ + 753, + 81 + ], + [ + 867, + 87 + ], + [ + 876, + 49 + ], + [ + 884, + 25 + ], + [ + 895, + 12 + ], + [ + 900, + 10 + ], + [ + 907, + 1 + ], + [ + 953, + 1 + ], + [ + 955, + 11 + ], + [ + 967, + 18 + ], + [ + 975, + 27 + ], + [ + 985, + 51 + ], + [ + 989, + 49 + ], + [ + 990, + 54 + ], + [ + 991, + 77 + ], + [ + 999, + 81 + ], + [ + 996, + 90 + ], + [ + 997, + 96 + ], + [ + 994, + 101 + ], + [ + 996, + 119 + ], + [ + 1000, + 119 + ], + [ + 1020, + 146 + ], + [ + 1048, + 193 + ], + [ + 1065, + 203 + ], + [ + 1072, + 202 + ], + [ + 1077, + 206 + ], + [ + 1088, + 207 + ], + [ + 1101, + 232 + ], + [ + 1097, + 234 + ], + [ + 1096, + 255 + ], + [ + 1298, + 238 + ], + [ + 1297, + 225 + ], + [ + 1302, + 208 + ], + [ + 1308, + 206 + ], + [ + 1314, + 190 + ], + [ + 1327, + 188 + ], + [ + 1331, + 176 + ], + [ + 1348, + 144 + ], + [ + 1345, + 112 + ], + [ + 1363, + 81 + ], + [ + 1405, + 79 + ], + [ + 1430, + 39 + ], + [ + 1516, + 36 + ], + [ + 1562, + 1 + ], + [ + 2048, + 1 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1532, + 463 + ], + [ + 1474, + 483 + ], + [ + 1480, + 499 + ], + [ + 1625, + 556 + ], + [ + 1808, + 602 + ], + [ + 1980, + 649 + ], + [ + 1990, + 650 + ], + [ + 1991, + 629 + ], + [ + 1995, + 625 + ], + [ + 2047, + 615 + ], + [ + 2047, + 519 + ], + [ + 1552, + 470 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1188, + 432 + ], + [ + 1187, + 422 + ], + [ + 1190, + 412 + ], + [ + 1187, + 405 + ], + [ + 1184, + 405 + ], + [ + 1181, + 404 + ], + [ + 1177, + 399 + ], + [ + 1168, + 397 + ], + [ + 1162, + 397 + ], + [ + 1162, + 397 + ], + [ + 1158, + 398 + ], + [ + 1158, + 398 + ], + [ + 1153, + 398 + ], + [ + 1148, + 393 + ], + [ + 1141, + 390 + ], + [ + 1130, + 392 + ], + [ + 1125, + 395 + ], + [ + 1122, + 397 + ], + [ + 1121, + 398 + ], + [ + 1113, + 393 + ], + [ + 1108, + 392 + ], + [ + 1090, + 393 + ], + [ + 1090, + 390 + ], + [ + 1089, + 376 + ], + [ + 1089, + 369 + ], + [ + 1086, + 367 + ], + [ + 1082, + 366 + ], + [ + 1075, + 363 + ], + [ + 1073, + 358 + ], + [ + 1071, + 351 + ], + [ + 1074, + 344 + ], + [ + 1075, + 342 + ], + [ + 1075, + 341 + ], + [ + 1075, + 329 + ], + [ + 1073, + 329 + ], + [ + 1070, + 318 + ], + [ + 1072, + 312 + ], + [ + 1072, + 310 + ], + [ + 1072, + 304 + ], + [ + 1065, + 301 + ], + [ + 1064, + 296 + ], + [ + 1065, + 292 + ], + [ + 1066, + 288 + ], + [ + 1066, + 284 + ], + [ + 1061, + 281 + ], + [ + 1061, + 278 + ], + [ + 1061, + 276 + ], + [ + 1064, + 267 + ], + [ + 1064, + 267 + ], + [ + 1066, + 254 + ], + [ + 1069, + 249 + ], + [ + 1075, + 248 + ], + [ + 1083, + 247 + ], + [ + 1089, + 245 + ], + [ + 1095, + 244 + ], + [ + 1100, + 235 + ], + [ + 1109, + 220 + ], + [ + 1109, + 215 + ], + [ + 1121, + 215 + ], + [ + 1121, + 215 + ], + [ + 1132, + 211 + ], + [ + 1138, + 209 + ], + [ + 1148, + 207 + ], + [ + 1155, + 202 + ], + [ + 1165, + 198 + ], + [ + 1170, + 200 + ], + [ + 1178, + 205 + ], + [ + 1192, + 207 + ], + [ + 1199, + 210 + ], + [ + 1203, + 212 + ], + [ + 1203, + 212 + ], + [ + 1212, + 209 + ], + [ + 1216, + 205 + ], + [ + 1221, + 197 + ], + [ + 1227, + 191 + ], + [ + 1230, + 187 + ], + [ + 1240, + 185 + ], + [ + 1241, + 190 + ], + [ + 1243, + 199 + ], + [ + 1245, + 206 + ], + [ + 1245, + 212 + ], + [ + 1251, + 215 + ], + [ + 1255, + 215 + ], + [ + 1265, + 219 + ], + [ + 1275, + 223 + ], + [ + 1285, + 225 + ], + [ + 1294, + 231 + ], + [ + 1301, + 244 + ], + [ + 1303, + 251 + ], + [ + 1319, + 257 + ], + [ + 1325, + 259 + ], + [ + 1334, + 269 + ], + [ + 1339, + 285 + ], + [ + 1339, + 308 + ], + [ + 1336, + 316 + ], + [ + 1328, + 317 + ], + [ + 1327, + 319 + ], + [ + 1324, + 332 + ], + [ + 1321, + 344 + ], + [ + 1313, + 349 + ], + [ + 1302, + 356 + ], + [ + 1265, + 369 + ], + [ + 1266, + 374 + ], + [ + 1247, + 379 + ], + [ + 1239, + 382 + ], + [ + 1229, + 386 + ], + [ + 1220, + 388 + ], + [ + 1210, + 399 + ], + [ + 1209, + 406 + ], + [ + 1206, + 423 + ], + [ + 1204, + 435 + ] + ] + }, + { + "label": "rider", + "polygon": [ + [ + 1271, + 454 + ], + [ + 1266, + 454 + ], + [ + 1265, + 451 + ], + [ + 1267, + 450 + ], + [ + 1268, + 446 + ], + [ + 1269, + 444 + ], + [ + 1270, + 439 + ], + [ + 1271, + 438 + ], + [ + 1272, + 436 + ], + [ + 1272, + 434 + ], + [ + 1272, + 432 + ], + [ + 1272, + 428 + ], + [ + 1270, + 427 + ], + [ + 1270, + 420 + ], + [ + 1270, + 420 + ], + [ + 1270, + 418 + ], + [ + 1270, + 418 + ], + [ + 1270, + 415 + ], + [ + 1273, + 414 + ], + [ + 1273, + 414 + ], + [ + 1273, + 410 + ], + [ + 1275, + 410 + ], + [ + 1277, + 406 + ], + [ + 1277, + 406 + ], + [ + 1280, + 406 + ], + [ + 1282, + 406 + ], + [ + 1282, + 404 + ], + [ + 1280, + 404 + ], + [ + 1280, + 401 + ], + [ + 1279, + 400 + ], + [ + 1279, + 399 + ], + [ + 1279, + 395 + ], + [ + 1281, + 393 + ], + [ + 1287, + 393 + ], + [ + 1289, + 399 + ], + [ + 1288, + 405 + ], + [ + 1293, + 406 + ], + [ + 1297, + 412 + ], + [ + 1299, + 416 + ], + [ + 1299, + 419 + ], + [ + 1299, + 426 + ], + [ + 1297, + 431 + ], + [ + 1297, + 431 + ], + [ + 1297, + 431 + ], + [ + 1295, + 432 + ], + [ + 1281, + 427 + ], + [ + 1277, + 443 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 1287, + 454 + ], + [ + 1281, + 454 + ], + [ + 1281, + 450 + ], + [ + 1281, + 448 + ], + [ + 1279, + 447 + ], + [ + 1277, + 447 + ], + [ + 1276, + 446 + ], + [ + 1276, + 440 + ], + [ + 1275, + 440 + ], + [ + 1275, + 440 + ], + [ + 1272, + 438 + ], + [ + 1272, + 436 + ], + [ + 1272, + 436 + ], + [ + 1272, + 431 + ], + [ + 1272, + 430 + ], + [ + 1270, + 430 + ], + [ + 1270, + 427 + ], + [ + 1270, + 425 + ], + [ + 1270, + 424 + ], + [ + 1273, + 423 + ], + [ + 1275, + 423 + ], + [ + 1278, + 422 + ], + [ + 1281, + 423 + ], + [ + 1285, + 425 + ], + [ + 1289, + 425 + ], + [ + 1293, + 422 + ], + [ + 1295, + 422 + ], + [ + 1297, + 422 + ], + [ + 1299, + 424 + ], + [ + 1299, + 429 + ], + [ + 1299, + 432 + ], + [ + 1299, + 435 + ], + [ + 1297, + 436 + ], + [ + 1297, + 438 + ], + [ + 1297, + 439 + ], + [ + 1295, + 442 + ], + [ + 1293, + 443 + ], + [ + 1293, + 443 + ], + [ + 1290, + 447 + ], + [ + 1290, + 447 + ], + [ + 1289, + 450 + ], + [ + 1289, + 452 + ], + [ + 1289, + 454 + ], + [ + 1289, + 454 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1048, + 298 + ], + [ + 1048, + 294 + ], + [ + 1056, + 294 + ], + [ + 1060, + 294 + ], + [ + 1060, + 306 + ], + [ + 1060, + 319 + ], + [ + 1048, + 318 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 954, + 420 + ], + [ + 951, + 419 + ], + [ + 950, + 398 + ], + [ + 951, + 367 + ], + [ + 951, + 333 + ], + [ + 955, + 320 + ], + [ + 965, + 314 + ], + [ + 1025, + 300 + ], + [ + 1055, + 298 + ], + [ + 1055, + 303 + ], + [ + 1041, + 302 + ], + [ + 1019, + 304 + ], + [ + 998, + 308 + ], + [ + 966, + 317 + ], + [ + 961, + 320 + ], + [ + 953, + 334 + ], + [ + 953, + 370 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1368, + 426 + ], + [ + 1373, + 427 + ], + [ + 1369, + 369 + ], + [ + 1365, + 342 + ], + [ + 1342, + 333 + ], + [ + 1300, + 332 + ], + [ + 1295, + 332 + ], + [ + 1295, + 337 + ], + [ + 1319, + 336 + ], + [ + 1344, + 338 + ], + [ + 1362, + 346 + ], + [ + 1367, + 367 + ], + [ + 1368, + 405 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1358, + 448 + ], + [ + 1360, + 439 + ], + [ + 1360, + 436 + ], + [ + 1361, + 428 + ], + [ + 1360, + 425 + ], + [ + 1356, + 425 + ], + [ + 1355, + 422 + ], + [ + 1355, + 416 + ], + [ + 1356, + 414 + ], + [ + 1357, + 410 + ], + [ + 1356, + 403 + ], + [ + 1357, + 402 + ], + [ + 1358, + 401 + ], + [ + 1360, + 401 + ], + [ + 1362, + 397 + ], + [ + 1367, + 392 + ], + [ + 1368, + 392 + ], + [ + 1372, + 393 + ], + [ + 1373, + 397 + ], + [ + 1373, + 397 + ], + [ + 1373, + 397 + ], + [ + 1373, + 400 + ], + [ + 1370, + 400 + ], + [ + 1370, + 403 + ], + [ + 1370, + 406 + ], + [ + 1370, + 406 + ], + [ + 1369, + 414 + ], + [ + 1370, + 414 + ], + [ + 1370, + 416 + ], + [ + 1370, + 417 + ], + [ + 1369, + 422 + ], + [ + 1369, + 422 + ], + [ + 1369, + 423 + ], + [ + 1369, + 423 + ], + [ + 1368, + 427 + ], + [ + 1369, + 427 + ], + [ + 1369, + 430 + ], + [ + 1369, + 434 + ], + [ + 1369, + 436 + ], + [ + 1368, + 437 + ], + [ + 1368, + 437 + ], + [ + 1366, + 439 + ], + [ + 1366, + 440 + ], + [ + 1365, + 444 + ], + [ + 1366, + 446 + ], + [ + 1368, + 446 + ], + [ + 1368, + 446 + ], + [ + 1368, + 448 + ], + [ + 1367, + 448 + ], + [ + 1363, + 448 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1287, + 327 + ], + [ + 1285, + 348 + ], + [ + 1296, + 346 + ], + [ + 1296, + 326 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1406, + 417 + ], + [ + 1409, + 378 + ], + [ + 1410, + 313 + ], + [ + 1407, + 289 + ], + [ + 1394, + 272 + ], + [ + 1326, + 258 + ], + [ + 1273, + 256 + ], + [ + 1273, + 252 + ], + [ + 1297, + 253 + ], + [ + 1327, + 255 + ], + [ + 1367, + 261 + ], + [ + 1402, + 272 + ], + [ + 1411, + 280 + ], + [ + 1412, + 296 + ], + [ + 1412, + 340 + ], + [ + 1411, + 420 + ], + [ + 1407, + 420 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1392, + 442 + ], + [ + 1377, + 448 + ], + [ + 1386, + 456 + ], + [ + 1465, + 488 + ], + [ + 1509, + 476 + ], + [ + 1446, + 423 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 894, + 412 + ], + [ + 893, + 390 + ], + [ + 913, + 388 + ], + [ + 912, + 431 + ], + [ + 897, + 430 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1278, + 243 + ], + [ + 1260, + 244 + ], + [ + 1260, + 278 + ], + [ + 1278, + 277 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1416, + 344 + ], + [ + 1415, + 376 + ], + [ + 1403, + 376 + ], + [ + 1402, + 374 + ], + [ + 1403, + 364 + ], + [ + 1400, + 363 + ], + [ + 1393, + 358 + ], + [ + 1393, + 357 + ], + [ + 1400, + 356 + ], + [ + 1400, + 352 + ], + [ + 1391, + 352 + ], + [ + 1394, + 348 + ], + [ + 1401, + 346 + ], + [ + 1400, + 344 + ], + [ + 1407, + 344 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1393, + 443 + ], + [ + 1394, + 424 + ], + [ + 1394, + 419 + ], + [ + 1400, + 417 + ], + [ + 1405, + 417 + ], + [ + 1411, + 415 + ], + [ + 1418, + 415 + ], + [ + 1419, + 415 + ], + [ + 1418, + 440 + ], + [ + 1409, + 443 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1416, + 442 + ], + [ + 1414, + 339 + ], + [ + 1413, + 327 + ], + [ + 1414, + 321 + ], + [ + 1421, + 321 + ], + [ + 1417, + 336 + ], + [ + 1422, + 444 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1421, + 330 + ], + [ + 1428, + 326 + ], + [ + 1433, + 327 + ], + [ + 1436, + 332 + ], + [ + 1436, + 340 + ], + [ + 1429, + 343 + ], + [ + 1442, + 343 + ], + [ + 1427, + 365 + ], + [ + 1417, + 346 + ], + [ + 1424, + 343 + ], + [ + 1416, + 338 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1453, + 304 + ], + [ + 1454, + 277 + ], + [ + 1455, + 269 + ], + [ + 1459, + 267 + ], + [ + 1461, + 265 + ], + [ + 1463, + 264 + ], + [ + 1473, + 263 + ], + [ + 1479, + 263 + ], + [ + 1490, + 272 + ], + [ + 1496, + 269 + ], + [ + 1497, + 268 + ], + [ + 1501, + 268 + ], + [ + 1500, + 259 + ], + [ + 1500, + 256 + ], + [ + 1507, + 252 + ], + [ + 1505, + 241 + ], + [ + 1503, + 234 + ], + [ + 1501, + 230 + ], + [ + 1496, + 228 + ], + [ + 1492, + 220 + ], + [ + 1490, + 212 + ], + [ + 1492, + 211 + ], + [ + 1504, + 210 + ], + [ + 1505, + 210 + ], + [ + 1520, + 208 + ], + [ + 1510, + 205 + ], + [ + 1510, + 199 + ], + [ + 1508, + 196 + ], + [ + 1508, + 192 + ], + [ + 1499, + 197 + ], + [ + 1496, + 198 + ], + [ + 1489, + 197 + ], + [ + 1488, + 191 + ], + [ + 1487, + 186 + ], + [ + 1492, + 186 + ], + [ + 1499, + 186 + ], + [ + 1500, + 183 + ], + [ + 1499, + 180 + ], + [ + 1500, + 166 + ], + [ + 1498, + 163 + ], + [ + 1498, + 156 + ], + [ + 1492, + 156 + ], + [ + 1490, + 154 + ], + [ + 1483, + 151 + ], + [ + 1482, + 157 + ], + [ + 1478, + 161 + ], + [ + 1470, + 158 + ], + [ + 1466, + 156 + ], + [ + 1464, + 154 + ], + [ + 1461, + 149 + ], + [ + 1460, + 145 + ], + [ + 1459, + 145 + ], + [ + 1453, + 144 + ], + [ + 1447, + 139 + ], + [ + 1438, + 130 + ], + [ + 1436, + 135 + ], + [ + 1437, + 150 + ], + [ + 1437, + 161 + ], + [ + 1437, + 167 + ], + [ + 1433, + 170 + ], + [ + 1425, + 170 + ], + [ + 1414, + 175 + ], + [ + 1418, + 178 + ], + [ + 1413, + 185 + ], + [ + 1407, + 185 + ], + [ + 1406, + 187 + ], + [ + 1406, + 195 + ], + [ + 1413, + 195 + ], + [ + 1409, + 208 + ], + [ + 1411, + 210 + ], + [ + 1418, + 211 + ], + [ + 1418, + 211 + ], + [ + 1413, + 217 + ], + [ + 1400, + 217 + ], + [ + 1400, + 212 + ], + [ + 1399, + 212 + ], + [ + 1404, + 229 + ], + [ + 1413, + 228 + ], + [ + 1416, + 225 + ], + [ + 1419, + 225 + ], + [ + 1419, + 229 + ], + [ + 1419, + 229 + ], + [ + 1414, + 234 + ], + [ + 1416, + 235 + ], + [ + 1422, + 243 + ], + [ + 1422, + 243 + ], + [ + 1425, + 245 + ], + [ + 1417, + 249 + ], + [ + 1421, + 260 + ], + [ + 1425, + 263 + ], + [ + 1434, + 267 + ], + [ + 1440, + 268 + ], + [ + 1442, + 268 + ], + [ + 1446, + 269 + ], + [ + 1449, + 284 + ], + [ + 1447, + 305 + ], + [ + 1447, + 307 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1496, + 339 + ], + [ + 1492, + 330 + ], + [ + 1496, + 321 + ], + [ + 1497, + 307 + ], + [ + 1497, + 302 + ], + [ + 1497, + 291 + ], + [ + 1497, + 281 + ], + [ + 1492, + 276 + ], + [ + 1489, + 269 + ], + [ + 1489, + 266 + ], + [ + 1489, + 259 + ], + [ + 1492, + 243 + ], + [ + 1495, + 223 + ], + [ + 1494, + 209 + ], + [ + 1492, + 197 + ], + [ + 1496, + 196 + ], + [ + 1504, + 196 + ], + [ + 1510, + 197 + ], + [ + 1511, + 207 + ], + [ + 1510, + 211 + ], + [ + 1510, + 215 + ], + [ + 1511, + 228 + ], + [ + 1503, + 248 + ], + [ + 1503, + 259 + ], + [ + 1505, + 280 + ], + [ + 1505, + 289 + ], + [ + 1508, + 288 + ], + [ + 1509, + 285 + ], + [ + 1511, + 265 + ], + [ + 1512, + 248 + ], + [ + 1520, + 246 + ], + [ + 1514, + 242 + ], + [ + 1511, + 242 + ], + [ + 1514, + 230 + ], + [ + 1514, + 225 + ], + [ + 1520, + 223 + ], + [ + 1526, + 224 + ], + [ + 1526, + 224 + ], + [ + 1520, + 217 + ], + [ + 1515, + 211 + ], + [ + 1514, + 209 + ], + [ + 1515, + 203 + ], + [ + 1525, + 203 + ], + [ + 1526, + 203 + ], + [ + 1534, + 211 + ], + [ + 1539, + 212 + ], + [ + 1539, + 212 + ], + [ + 1531, + 198 + ], + [ + 1531, + 196 + ], + [ + 1531, + 190 + ], + [ + 1531, + 188 + ], + [ + 1537, + 187 + ], + [ + 1536, + 182 + ], + [ + 1535, + 175 + ], + [ + 1537, + 172 + ], + [ + 1543, + 170 + ], + [ + 1551, + 170 + ], + [ + 1552, + 170 + ], + [ + 1548, + 167 + ], + [ + 1546, + 159 + ], + [ + 1546, + 154 + ], + [ + 1548, + 153 + ], + [ + 1552, + 150 + ], + [ + 1563, + 149 + ], + [ + 1571, + 149 + ], + [ + 1573, + 143 + ], + [ + 1571, + 138 + ], + [ + 1569, + 130 + ], + [ + 1572, + 126 + ], + [ + 1586, + 130 + ], + [ + 1654, + 134 + ], + [ + 1656, + 148 + ], + [ + 1649, + 158 + ], + [ + 1647, + 156 + ], + [ + 1639, + 158 + ], + [ + 1644, + 168 + ], + [ + 1631, + 185 + ], + [ + 1624, + 207 + ], + [ + 1621, + 210 + ], + [ + 1612, + 221 + ], + [ + 1601, + 228 + ], + [ + 1593, + 221 + ], + [ + 1586, + 221 + ], + [ + 1577, + 221 + ], + [ + 1570, + 225 + ], + [ + 1572, + 235 + ], + [ + 1572, + 235 + ], + [ + 1571, + 251 + ], + [ + 1561, + 251 + ], + [ + 1556, + 245 + ], + [ + 1549, + 242 + ], + [ + 1537, + 246 + ], + [ + 1533, + 255 + ], + [ + 1531, + 263 + ], + [ + 1523, + 269 + ], + [ + 1516, + 276 + ], + [ + 1515, + 282 + ], + [ + 1513, + 295 + ], + [ + 1508, + 306 + ], + [ + 1505, + 315 + ], + [ + 1502, + 324 + ], + [ + 1501, + 329 + ], + [ + 1500, + 337 + ], + [ + 1500, + 341 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 1599, + 410 + ], + [ + 1599, + 401 + ], + [ + 1611, + 392 + ], + [ + 1610, + 388 + ], + [ + 1611, + 382 + ], + [ + 1618, + 381 + ], + [ + 1622, + 392 + ], + [ + 1624, + 393 + ], + [ + 1623, + 388 + ], + [ + 1625, + 383 + ], + [ + 1629, + 381 + ], + [ + 1635, + 381 + ], + [ + 1639, + 381 + ], + [ + 1643, + 388 + ], + [ + 1645, + 388 + ], + [ + 1646, + 397 + ], + [ + 1648, + 401 + ], + [ + 1648, + 414 + ], + [ + 1647, + 419 + ], + [ + 1647, + 422 + ], + [ + 1647, + 427 + ], + [ + 1646, + 431 + ], + [ + 1645, + 437 + ], + [ + 1645, + 437 + ], + [ + 1606, + 437 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1423, + 449 + ], + [ + 1424, + 430 + ], + [ + 1421, + 431 + ], + [ + 1422, + 427 + ], + [ + 1437, + 427 + ], + [ + 1437, + 451 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1397, + 449 + ], + [ + 1400, + 423 + ], + [ + 1398, + 420 + ], + [ + 1404, + 419 + ], + [ + 1404, + 424 + ], + [ + 1404, + 438 + ], + [ + 1404, + 447 + ], + [ + 1404, + 451 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1433, + 456 + ], + [ + 1434, + 369 + ], + [ + 1438, + 369 + ], + [ + 1437, + 459 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1439, + 376 + ], + [ + 1436, + 376 + ], + [ + 1433, + 305 + ], + [ + 1468, + 305 + ], + [ + 1468, + 363 + ], + [ + 1439, + 362 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1428, + 376 + ], + [ + 1447, + 376 + ], + [ + 1449, + 416 + ], + [ + 1428, + 415 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1471, + 439 + ], + [ + 1472, + 341 + ], + [ + 1476, + 340 + ], + [ + 1476, + 443 + ], + [ + 1470, + 443 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1623, + 509 + ], + [ + 1557, + 513 + ], + [ + 1549, + 515 + ], + [ + 1549, + 517 + ], + [ + 1555, + 525 + ], + [ + 1575, + 533 + ], + [ + 1686, + 536 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1471, + 340 + ], + [ + 1465, + 337 + ], + [ + 1463, + 332 + ], + [ + 1464, + 325 + ], + [ + 1471, + 321 + ], + [ + 1478, + 321 + ], + [ + 1484, + 329 + ], + [ + 1482, + 336 + ], + [ + 1478, + 339 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1452, + 460 + ], + [ + 1453, + 427 + ], + [ + 1451, + 425 + ], + [ + 1450, + 424 + ], + [ + 1453, + 423 + ], + [ + 1458, + 423 + ], + [ + 1458, + 426 + ], + [ + 1455, + 428 + ], + [ + 1456, + 462 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 763, + 437 + ], + [ + 760, + 426 + ], + [ + 756, + 426 + ], + [ + 752, + 423 + ], + [ + 752, + 415 + ], + [ + 756, + 410 + ], + [ + 764, + 409 + ], + [ + 766, + 407 + ], + [ + 773, + 409 + ], + [ + 781, + 407 + ], + [ + 782, + 403 + ], + [ + 789, + 404 + ], + [ + 792, + 412 + ], + [ + 802, + 409 + ], + [ + 803, + 409 + ], + [ + 807, + 409 + ], + [ + 815, + 406 + ], + [ + 821, + 403 + ], + [ + 825, + 405 + ], + [ + 827, + 407 + ], + [ + 827, + 416 + ], + [ + 826, + 424 + ], + [ + 824, + 436 + ], + [ + 824, + 437 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 894, + 412 + ], + [ + 893, + 390 + ], + [ + 913, + 388 + ], + [ + 912, + 431 + ], + [ + 897, + 430 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1459, + 462 + ], + [ + 1460, + 427 + ], + [ + 1458, + 427 + ], + [ + 1458, + 425 + ], + [ + 1462, + 423 + ], + [ + 1464, + 425 + ], + [ + 1462, + 426 + ], + [ + 1463, + 463 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1461, + 489 + ], + [ + 1461, + 464 + ], + [ + 1462, + 458 + ], + [ + 1462, + 455 + ], + [ + 1463, + 453 + ], + [ + 1464, + 451 + ], + [ + 1465, + 450 + ], + [ + 1466, + 449 + ], + [ + 1472, + 447 + ], + [ + 1468, + 444 + ], + [ + 1465, + 444 + ], + [ + 1461, + 444 + ], + [ + 1460, + 442 + ], + [ + 1460, + 438 + ], + [ + 1463, + 437 + ], + [ + 1467, + 437 + ], + [ + 1472, + 437 + ], + [ + 1473, + 439 + ], + [ + 1477, + 439 + ], + [ + 1483, + 427 + ], + [ + 1489, + 420 + ], + [ + 1499, + 418 + ], + [ + 1509, + 417 + ], + [ + 1516, + 417 + ], + [ + 1519, + 417 + ], + [ + 1555, + 415 + ], + [ + 1499, + 488 + ], + [ + 1490, + 487 + ], + [ + 1490, + 491 + ], + [ + 1490, + 492 + ], + [ + 1490, + 495 + ], + [ + 1490, + 495 + ], + [ + 1490, + 497 + ], + [ + 1484, + 499 + ], + [ + 1478, + 499 + ], + [ + 1477, + 499 + ], + [ + 1477, + 499 + ], + [ + 1473, + 495 + ], + [ + 1473, + 495 + ], + [ + 1473, + 492 + ], + [ + 1472, + 493 + ], + [ + 1472, + 493 + ], + [ + 1468, + 493 + ], + [ + 1465, + 493 + ], + [ + 1465, + 493 + ], + [ + 1463, + 493 + ], + [ + 1463, + 493 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1624, + 501 + ], + [ + 1609, + 501 + ], + [ + 1563, + 503 + ], + [ + 1535, + 503 + ], + [ + 1536, + 512 + ], + [ + 1532, + 514 + ], + [ + 1522, + 515 + ], + [ + 1521, + 515 + ], + [ + 1517, + 516 + ], + [ + 1516, + 513 + ], + [ + 1516, + 511 + ], + [ + 1515, + 511 + ], + [ + 1514, + 508 + ], + [ + 1514, + 505 + ], + [ + 1514, + 501 + ], + [ + 1514, + 504 + ], + [ + 1511, + 508 + ], + [ + 1510, + 508 + ], + [ + 1510, + 508 + ], + [ + 1508, + 508 + ], + [ + 1503, + 508 + ], + [ + 1502, + 508 + ], + [ + 1500, + 508 + ], + [ + 1500, + 507 + ], + [ + 1499, + 500 + ], + [ + 1501, + 490 + ], + [ + 1499, + 489 + ], + [ + 1499, + 486 + ], + [ + 1500, + 481 + ], + [ + 1499, + 478 + ], + [ + 1498, + 471 + ], + [ + 1497, + 467 + ], + [ + 1497, + 464 + ], + [ + 1497, + 462 + ], + [ + 1497, + 460 + ], + [ + 1499, + 458 + ], + [ + 1503, + 453 + ], + [ + 1509, + 446 + ], + [ + 1497, + 448 + ], + [ + 1496, + 446 + ], + [ + 1496, + 444 + ], + [ + 1495, + 442 + ], + [ + 1494, + 441 + ], + [ + 1494, + 440 + ], + [ + 1495, + 439 + ], + [ + 1499, + 437 + ], + [ + 1505, + 435 + ], + [ + 1509, + 439 + ], + [ + 1511, + 441 + ], + [ + 1513, + 436 + ], + [ + 1517, + 426 + ], + [ + 1523, + 423 + ], + [ + 1526, + 418 + ], + [ + 1527, + 415 + ], + [ + 1532, + 414 + ], + [ + 1536, + 414 + ], + [ + 1536, + 414 + ], + [ + 1544, + 413 + ], + [ + 1608, + 413 + ], + [ + 1617, + 415 + ], + [ + 1620, + 416 + ], + [ + 1621, + 419 + ], + [ + 1623, + 426 + ], + [ + 1623, + 427 + ], + [ + 1624, + 432 + ], + [ + 1628, + 439 + ], + [ + 1628, + 440 + ], + [ + 1628, + 461 + ], + [ + 1628, + 462 + ], + [ + 1626, + 468 + ], + [ + 1628, + 473 + ], + [ + 1628, + 486 + ], + [ + 1629, + 492 + ], + [ + 1629, + 500 + ], + [ + 1629, + 500 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1563, + 514 + ], + [ + 1564, + 497 + ], + [ + 1581, + 496 + ], + [ + 1580, + 405 + ], + [ + 1586, + 405 + ], + [ + 1586, + 449 + ], + [ + 1589, + 496 + ], + [ + 1604, + 496 + ], + [ + 1610, + 499 + ], + [ + 1610, + 516 + ], + [ + 1580, + 517 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1552, + 271 + ], + [ + 1600, + 271 + ], + [ + 1604, + 340 + ], + [ + 1607, + 403 + ], + [ + 1565, + 407 + ], + [ + 1562, + 407 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 912, + 414 + ], + [ + 912, + 436 + ], + [ + 928, + 435 + ], + [ + 954, + 429 + ], + [ + 951, + 412 + ], + [ + 925, + 414 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1624, + 512 + ], + [ + 1620, + 358 + ], + [ + 1616, + 289 + ], + [ + 1616, + 289 + ], + [ + 1617, + 191 + ], + [ + 1617, + 190 + ], + [ + 1609, + 169 + ], + [ + 1607, + 167 + ], + [ + 1596, + 154 + ], + [ + 1574, + 144 + ], + [ + 1563, + 131 + ], + [ + 1562, + 126 + ], + [ + 1562, + 125 + ], + [ + 1541, + 121 + ], + [ + 1537, + 127 + ], + [ + 1538, + 111 + ], + [ + 1539, + 111 + ], + [ + 1529, + 110 + ], + [ + 1520, + 112 + ], + [ + 1487, + 118 + ], + [ + 1471, + 120 + ], + [ + 1460, + 113 + ], + [ + 1427, + 109 + ], + [ + 1406, + 112 + ], + [ + 1414, + 87 + ], + [ + 1425, + 77 + ], + [ + 1455, + 86 + ], + [ + 1465, + 85 + ], + [ + 1467, + 78 + ], + [ + 1464, + 73 + ], + [ + 1453, + 75 + ], + [ + 1447, + 71 + ], + [ + 1443, + 61 + ], + [ + 1443, + 49 + ], + [ + 1452, + 46 + ], + [ + 1465, + 38 + ], + [ + 1465, + 27 + ], + [ + 1477, + 5 + ], + [ + 1494, + 5 + ], + [ + 1500, + 0 + ], + [ + 1775, + 0 + ], + [ + 1768, + 0 + ], + [ + 1766, + 10 + ], + [ + 1783, + 23 + ], + [ + 1797, + 44 + ], + [ + 1778, + 51 + ], + [ + 1794, + 53 + ], + [ + 1800, + 57 + ], + [ + 1804, + 69 + ], + [ + 1754, + 71 + ], + [ + 1753, + 80 + ], + [ + 1765, + 94 + ], + [ + 1755, + 101 + ], + [ + 1748, + 100 + ], + [ + 1758, + 112 + ], + [ + 1776, + 119 + ], + [ + 1787, + 126 + ], + [ + 1793, + 133 + ], + [ + 1793, + 157 + ], + [ + 1777, + 161 + ], + [ + 1780, + 182 + ], + [ + 1771, + 188 + ], + [ + 1757, + 192 + ], + [ + 1733, + 186 + ], + [ + 1686, + 188 + ], + [ + 1657, + 175 + ], + [ + 1651, + 174 + ], + [ + 1642, + 190 + ], + [ + 1631, + 207 + ], + [ + 1633, + 209 + ], + [ + 1641, + 428 + ], + [ + 1638, + 511 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1633, + 493 + ], + [ + 1633, + 486 + ], + [ + 1638, + 481 + ], + [ + 1641, + 460 + ], + [ + 1649, + 452 + ], + [ + 1649, + 450 + ], + [ + 1643, + 450 + ], + [ + 1636, + 451 + ], + [ + 1632, + 447 + ], + [ + 1630, + 439 + ], + [ + 1635, + 436 + ], + [ + 1648, + 437 + ], + [ + 1657, + 439 + ], + [ + 1659, + 425 + ], + [ + 1672, + 401 + ], + [ + 1705, + 392 + ], + [ + 1793, + 393 + ], + [ + 1775, + 571 + ], + [ + 1708, + 540 + ], + [ + 1699, + 540 + ], + [ + 1699, + 547 + ], + [ + 1698, + 552 + ], + [ + 1692, + 558 + ], + [ + 1686, + 559 + ], + [ + 1679, + 556 + ], + [ + 1672, + 553 + ], + [ + 1670, + 551 + ], + [ + 1667, + 545 + ], + [ + 1667, + 539 + ], + [ + 1667, + 536 + ], + [ + 1661, + 536 + ], + [ + 1659, + 545 + ], + [ + 1656, + 547 + ], + [ + 1645, + 549 + ], + [ + 1642, + 547 + ], + [ + 1641, + 544 + ], + [ + 1636, + 533 + ], + [ + 1635, + 525 + ], + [ + 1635, + 520 + ], + [ + 1636, + 515 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1711, + 513 + ], + [ + 1716, + 493 + ], + [ + 1724, + 476 + ], + [ + 1740, + 464 + ], + [ + 1729, + 464 + ], + [ + 1722, + 464 + ], + [ + 1712, + 459 + ], + [ + 1715, + 452 + ], + [ + 1726, + 444 + ], + [ + 1738, + 447 + ], + [ + 1740, + 454 + ], + [ + 1743, + 454 + ], + [ + 1746, + 447 + ], + [ + 1758, + 416 + ], + [ + 1779, + 395 + ], + [ + 1791, + 390 + ], + [ + 1791, + 383 + ], + [ + 1799, + 376 + ], + [ + 1809, + 374 + ], + [ + 1842, + 369 + ], + [ + 1845, + 375 + ], + [ + 1972, + 367 + ], + [ + 2021, + 380 + ], + [ + 2015, + 389 + ], + [ + 2047, + 461 + ], + [ + 2047, + 597 + ], + [ + 2047, + 600 + ], + [ + 2027, + 600 + ], + [ + 2015, + 598 + ], + [ + 2013, + 587 + ], + [ + 2010, + 581 + ], + [ + 1996, + 571 + ], + [ + 1845, + 581 + ], + [ + 1843, + 596 + ], + [ + 1841, + 602 + ], + [ + 1815, + 607 + ], + [ + 1805, + 607 + ], + [ + 1794, + 593 + ], + [ + 1792, + 572 + ], + [ + 1766, + 566 + ], + [ + 1759, + 565 + ], + [ + 1753, + 582 + ], + [ + 1740, + 585 + ], + [ + 1726, + 584 + ], + [ + 1717, + 575 + ], + [ + 1706, + 561 + ], + [ + 1704, + 554 + ], + [ + 1703, + 546 + ], + [ + 1702, + 539 + ], + [ + 1702, + 532 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 768, + 435 + ], + [ + 767, + 411 + ], + [ + 866, + 412 + ], + [ + 907, + 415 + ], + [ + 910, + 432 + ], + [ + 848, + 432 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1905, + 477 + ], + [ + 1906, + 497 + ], + [ + 1995, + 495 + ], + [ + 1994, + 474 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 1096, + 442 + ], + [ + 1106, + 441 + ], + [ + 1105, + 432 + ], + [ + 1100, + 424 + ], + [ + 1099, + 418 + ], + [ + 1099, + 413 + ], + [ + 1099, + 410 + ], + [ + 1097, + 404 + ], + [ + 1096, + 401 + ], + [ + 1095, + 401 + ], + [ + 1092, + 401 + ], + [ + 1088, + 402 + ], + [ + 1088, + 410 + ], + [ + 1088, + 411 + ], + [ + 1087, + 407 + ], + [ + 1088, + 402 + ], + [ + 1086, + 399 + ], + [ + 1082, + 400 + ], + [ + 1081, + 400 + ], + [ + 1077, + 395 + ], + [ + 1071, + 397 + ], + [ + 1069, + 400 + ], + [ + 1069, + 401 + ], + [ + 1068, + 407 + ], + [ + 1069, + 407 + ], + [ + 1071, + 409 + ], + [ + 1071, + 409 + ], + [ + 1072, + 414 + ], + [ + 1065, + 417 + ], + [ + 1061, + 417 + ], + [ + 1063, + 423 + ], + [ + 1069, + 424 + ], + [ + 1069, + 432 + ], + [ + 1069, + 440 + ], + [ + 1066, + 446 + ], + [ + 1060, + 451 + ], + [ + 1066, + 452 + ], + [ + 1073, + 452 + ], + [ + 1075, + 441 + ], + [ + 1075, + 436 + ], + [ + 1076, + 435 + ], + [ + 1077, + 435 + ], + [ + 1078, + 437 + ], + [ + 1078, + 437 + ], + [ + 1078, + 443 + ], + [ + 1078, + 444 + ], + [ + 1078, + 451 + ], + [ + 1082, + 452 + ], + [ + 1083, + 452 + ], + [ + 1092, + 451 + ], + [ + 1092, + 451 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 0, + 547 + ], + [ + 24, + 538 + ], + [ + 80, + 533 + ], + [ + 94, + 539 + ], + [ + 129, + 536 + ], + [ + 138, + 531 + ], + [ + 229, + 513 + ], + [ + 261, + 525 + ], + [ + 289, + 516 + ], + [ + 298, + 515 + ], + [ + 310, + 513 + ], + [ + 321, + 508 + ], + [ + 365, + 509 + ], + [ + 381, + 503 + ], + [ + 419, + 502 + ], + [ + 459, + 493 + ], + [ + 489, + 488 + ], + [ + 512, + 490 + ], + [ + 546, + 483 + ], + [ + 566, + 489 + ], + [ + 573, + 486 + ], + [ + 581, + 479 + ], + [ + 615, + 470 + ], + [ + 622, + 472 + ], + [ + 625, + 472 + ], + [ + 629, + 463 + ], + [ + 657, + 460 + ], + [ + 702, + 460 + ], + [ + 737, + 456 + ], + [ + 767, + 450 + ], + [ + 932, + 450 + ], + [ + 934, + 452 + ], + [ + 829, + 486 + ], + [ + 690, + 517 + ], + [ + 1, + 626 + ], + [ + 1, + 626 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1062, + 420 + ], + [ + 1052, + 420 + ], + [ + 1047, + 423 + ], + [ + 1045, + 428 + ], + [ + 1044, + 430 + ], + [ + 1044, + 435 + ], + [ + 1040, + 441 + ], + [ + 1046, + 444 + ], + [ + 1044, + 447 + ], + [ + 1041, + 450 + ], + [ + 1048, + 450 + ], + [ + 1053, + 450 + ], + [ + 1055, + 449 + ], + [ + 1057, + 444 + ], + [ + 1059, + 447 + ], + [ + 1058, + 451 + ], + [ + 1059, + 452 + ], + [ + 1062, + 452 + ], + [ + 1063, + 452 + ], + [ + 1065, + 447 + ], + [ + 1066, + 441 + ], + [ + 1066, + 438 + ], + [ + 1066, + 436 + ], + [ + 1069, + 435 + ], + [ + 1070, + 427 + ], + [ + 1071, + 420 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 763, + 466 + ], + [ + 763, + 436 + ], + [ + 864, + 432 + ], + [ + 915, + 431 + ], + [ + 916, + 455 + ], + [ + 866, + 466 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 952, + 440 + ], + [ + 932, + 452 + ], + [ + 916, + 451 + ], + [ + 915, + 435 + ], + [ + 928, + 435 + ], + [ + 951, + 428 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 917, + 448 + ], + [ + 917, + 388 + ], + [ + 921, + 388 + ], + [ + 921, + 452 + ], + [ + 916, + 452 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 919, + 389 + ], + [ + 915, + 389 + ], + [ + 915, + 362 + ], + [ + 924, + 361 + ], + [ + 924, + 389 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 730, + 462 + ], + [ + 724, + 462 + ], + [ + 719, + 461 + ], + [ + 724, + 458 + ], + [ + 726, + 456 + ], + [ + 724, + 453 + ], + [ + 728, + 444 + ], + [ + 729, + 438 + ], + [ + 730, + 430 + ], + [ + 731, + 426 + ], + [ + 732, + 423 + ], + [ + 726, + 419 + ], + [ + 726, + 415 + ], + [ + 731, + 412 + ], + [ + 732, + 406 + ], + [ + 737, + 403 + ], + [ + 744, + 410 + ], + [ + 745, + 412 + ], + [ + 748, + 416 + ], + [ + 752, + 423 + ], + [ + 749, + 429 + ], + [ + 748, + 430 + ], + [ + 751, + 436 + ], + [ + 748, + 441 + ], + [ + 747, + 448 + ], + [ + 748, + 452 + ], + [ + 748, + 455 + ], + [ + 746, + 458 + ], + [ + 744, + 460 + ], + [ + 740, + 456 + ], + [ + 740, + 452 + ], + [ + 740, + 447 + ], + [ + 740, + 443 + ], + [ + 737, + 442 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 797, + 454 + ], + [ + 801, + 337 + ], + [ + 794, + 316 + ], + [ + 790, + 332 + ], + [ + 784, + 333 + ], + [ + 755, + 333 + ], + [ + 747, + 331 + ], + [ + 729, + 331 + ], + [ + 724, + 331 + ], + [ + 721, + 319 + ], + [ + 719, + 314 + ], + [ + 712, + 317 + ], + [ + 703, + 320 + ], + [ + 699, + 321 + ], + [ + 694, + 308 + ], + [ + 691, + 303 + ], + [ + 681, + 298 + ], + [ + 680, + 287 + ], + [ + 685, + 279 + ], + [ + 683, + 275 + ], + [ + 669, + 259 + ], + [ + 675, + 248 + ], + [ + 675, + 248 + ], + [ + 690, + 242 + ], + [ + 682, + 169 + ], + [ + 687, + 166 + ], + [ + 705, + 158 + ], + [ + 717, + 126 + ], + [ + 730, + 119 + ], + [ + 749, + 106 + ], + [ + 759, + 75 + ], + [ + 768, + 69 + ], + [ + 790, + 62 + ], + [ + 805, + 41 + ], + [ + 814, + 20 + ], + [ + 827, + 5 + ], + [ + 829, + 4 + ], + [ + 829, + 14 + ], + [ + 836, + 19 + ], + [ + 879, + 4 + ], + [ + 879, + 27 + ], + [ + 878, + 59 + ], + [ + 886, + 69 + ], + [ + 900, + 73 + ], + [ + 917, + 93 + ], + [ + 924, + 94 + ], + [ + 943, + 113 + ], + [ + 917, + 145 + ], + [ + 917, + 147 + ], + [ + 932, + 159 + ], + [ + 924, + 195 + ], + [ + 909, + 207 + ], + [ + 921, + 215 + ], + [ + 937, + 224 + ], + [ + 946, + 228 + ], + [ + 939, + 256 + ], + [ + 918, + 273 + ], + [ + 892, + 279 + ], + [ + 880, + 307 + ], + [ + 850, + 339 + ], + [ + 830, + 343 + ], + [ + 807, + 349 + ], + [ + 806, + 378 + ], + [ + 808, + 459 + ], + [ + 800, + 459 + ], + [ + 800, + 450 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 783, + 468 + ], + [ + 771, + 477 + ], + [ + 757, + 475 + ], + [ + 755, + 470 + ], + [ + 754, + 449 + ], + [ + 769, + 446 + ], + [ + 771, + 440 + ], + [ + 784, + 439 + ], + [ + 789, + 439 + ], + [ + 785, + 448 + ], + [ + 790, + 452 + ], + [ + 799, + 449 + ], + [ + 803, + 438 + ], + [ + 795, + 432 + ], + [ + 806, + 425 + ], + [ + 815, + 428 + ], + [ + 819, + 434 + ], + [ + 818, + 439 + ], + [ + 814, + 442 + ], + [ + 814, + 446 + ], + [ + 816, + 446 + ], + [ + 820, + 441 + ], + [ + 822, + 437 + ], + [ + 824, + 428 + ], + [ + 829, + 427 + ], + [ + 832, + 427 + ], + [ + 831, + 437 + ], + [ + 831, + 439 + ], + [ + 833, + 443 + ], + [ + 837, + 448 + ], + [ + 840, + 453 + ], + [ + 840, + 454 + ], + [ + 842, + 464 + ], + [ + 843, + 468 + ], + [ + 843, + 470 + ], + [ + 842, + 473 + ], + [ + 840, + 473 + ], + [ + 827, + 471 + ], + [ + 821, + 475 + ], + [ + 816, + 477 + ], + [ + 813, + 477 + ], + [ + 805, + 472 + ], + [ + 801, + 467 + ], + [ + 801, + 465 + ], + [ + 799, + 465 + ], + [ + 795, + 465 + ], + [ + 794, + 464 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 659, + 465 + ], + [ + 653, + 465 + ], + [ + 644, + 474 + ], + [ + 642, + 489 + ], + [ + 649, + 501 + ], + [ + 668, + 503 + ], + [ + 679, + 490 + ], + [ + 682, + 489 + ], + [ + 686, + 497 + ], + [ + 695, + 507 + ], + [ + 710, + 500 + ], + [ + 708, + 478 + ], + [ + 698, + 468 + ], + [ + 688, + 464 + ], + [ + 682, + 464 + ], + [ + 681, + 458 + ], + [ + 686, + 455 + ], + [ + 684, + 452 + ], + [ + 678, + 450 + ], + [ + 675, + 451 + ], + [ + 661, + 442 + ], + [ + 665, + 437 + ], + [ + 659, + 431 + ], + [ + 653, + 432 + ], + [ + 653, + 435 + ], + [ + 659, + 436 + ], + [ + 660, + 441 + ], + [ + 656, + 446 + ], + [ + 653, + 447 + ], + [ + 653, + 442 + ], + [ + 647, + 442 + ], + [ + 644, + 448 + ], + [ + 649, + 452 + ], + [ + 653, + 453 + ], + [ + 655, + 456 + ], + [ + 663, + 460 + ], + [ + 663, + 465 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 666, + 503 + ], + [ + 665, + 271 + ], + [ + 668, + 271 + ], + [ + 672, + 451 + ], + [ + 673, + 503 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 668, + 271 + ], + [ + 625, + 273 + ], + [ + 625, + 315 + ], + [ + 672, + 315 + ], + [ + 672, + 271 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 686, + 453 + ], + [ + 690, + 275 + ], + [ + 686, + 275 + ], + [ + 685, + 275 + ], + [ + 678, + 280 + ], + [ + 668, + 279 + ], + [ + 663, + 277 + ], + [ + 660, + 269 + ], + [ + 662, + 260 + ], + [ + 665, + 253 + ], + [ + 649, + 257 + ], + [ + 615, + 261 + ], + [ + 614, + 261 + ], + [ + 609, + 260 + ], + [ + 584, + 254 + ], + [ + 573, + 247 + ], + [ + 588, + 229 + ], + [ + 599, + 223 + ], + [ + 589, + 200 + ], + [ + 571, + 195 + ], + [ + 559, + 190 + ], + [ + 563, + 184 + ], + [ + 568, + 174 + ], + [ + 564, + 139 + ], + [ + 590, + 135 + ], + [ + 592, + 134 + ], + [ + 583, + 119 + ], + [ + 574, + 106 + ], + [ + 584, + 99 + ], + [ + 593, + 98 + ], + [ + 611, + 94 + ], + [ + 655, + 95 + ], + [ + 617, + 84 + ], + [ + 604, + 80 + ], + [ + 596, + 70 + ], + [ + 597, + 61 + ], + [ + 610, + 62 + ], + [ + 626, + 63 + ], + [ + 627, + 61 + ], + [ + 621, + 50 + ], + [ + 619, + 36 + ], + [ + 619, + 24 + ], + [ + 625, + 21 + ], + [ + 633, + 3 + ], + [ + 641, + 13 + ], + [ + 653, + 0 + ], + [ + 764, + 3 + ], + [ + 760, + 0 + ], + [ + 768, + 22 + ], + [ + 789, + 20 + ], + [ + 791, + 37 + ], + [ + 789, + 41 + ], + [ + 782, + 47 + ], + [ + 787, + 51 + ], + [ + 800, + 74 + ], + [ + 817, + 97 + ], + [ + 821, + 108 + ], + [ + 831, + 127 + ], + [ + 833, + 143 + ], + [ + 824, + 158 + ], + [ + 824, + 161 + ], + [ + 825, + 179 + ], + [ + 825, + 195 + ], + [ + 810, + 217 + ], + [ + 800, + 233 + ], + [ + 794, + 254 + ], + [ + 760, + 260 + ], + [ + 749, + 270 + ], + [ + 728, + 281 + ], + [ + 714, + 287 + ], + [ + 706, + 287 + ], + [ + 703, + 287 + ], + [ + 699, + 287 + ], + [ + 695, + 462 + ], + [ + 688, + 461 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 449, + 499 + ], + [ + 447, + 289 + ], + [ + 447, + 290 + ], + [ + 438, + 295 + ], + [ + 424, + 304 + ], + [ + 420, + 314 + ], + [ + 415, + 307 + ], + [ + 404, + 292 + ], + [ + 401, + 279 + ], + [ + 377, + 278 + ], + [ + 362, + 275 + ], + [ + 336, + 284 + ], + [ + 307, + 279 + ], + [ + 305, + 265 + ], + [ + 327, + 248 + ], + [ + 340, + 243 + ], + [ + 331, + 241 + ], + [ + 306, + 257 + ], + [ + 270, + 254 + ], + [ + 258, + 248 + ], + [ + 273, + 232 + ], + [ + 284, + 210 + ], + [ + 269, + 211 + ], + [ + 244, + 205 + ], + [ + 265, + 195 + ], + [ + 285, + 190 + ], + [ + 301, + 180 + ], + [ + 278, + 179 + ], + [ + 240, + 181 + ], + [ + 252, + 166 + ], + [ + 330, + 150 + ], + [ + 339, + 149 + ], + [ + 350, + 126 + ], + [ + 326, + 120 + ], + [ + 279, + 132 + ], + [ + 259, + 130 + ], + [ + 306, + 88 + ], + [ + 327, + 84 + ], + [ + 332, + 72 + ], + [ + 334, + 63 + ], + [ + 336, + 48 + ], + [ + 332, + 31 + ], + [ + 277, + 39 + ], + [ + 246, + 42 + ], + [ + 290, + 16 + ], + [ + 304, + 5 + ], + [ + 359, + 11 + ], + [ + 379, + 0 + ], + [ + 532, + 0 + ], + [ + 571, + 2 + ], + [ + 547, + 13 + ], + [ + 574, + 21 + ], + [ + 578, + 29 + ], + [ + 575, + 66 + ], + [ + 571, + 69 + ], + [ + 574, + 92 + ], + [ + 585, + 122 + ], + [ + 573, + 138 + ], + [ + 600, + 138 + ], + [ + 604, + 160 + ], + [ + 564, + 191 + ], + [ + 565, + 211 + ], + [ + 572, + 245 + ], + [ + 564, + 248 + ], + [ + 552, + 265 + ], + [ + 517, + 264 + ], + [ + 511, + 272 + ], + [ + 491, + 268 + ], + [ + 456, + 275 + ], + [ + 456, + 283 + ], + [ + 460, + 501 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 45, + 533 + ], + [ + 47, + 175 + ], + [ + 47, + 175 + ], + [ + 36, + 179 + ], + [ + 36, + 187 + ], + [ + 36, + 192 + ], + [ + 37, + 208 + ], + [ + 32, + 230 + ], + [ + 27, + 230 + ], + [ + 15, + 233 + ], + [ + 5, + 232 + ], + [ + 3, + 0 + ], + [ + 281, + 0 + ], + [ + 280, + 10 + ], + [ + 275, + 14 + ], + [ + 283, + 31 + ], + [ + 287, + 54 + ], + [ + 278, + 63 + ], + [ + 282, + 99 + ], + [ + 264, + 100 + ], + [ + 263, + 129 + ], + [ + 233, + 135 + ], + [ + 229, + 144 + ], + [ + 212, + 161 + ], + [ + 179, + 154 + ], + [ + 171, + 151 + ], + [ + 172, + 168 + ], + [ + 141, + 174 + ], + [ + 117, + 181 + ], + [ + 102, + 162 + ], + [ + 97, + 170 + ], + [ + 97, + 208 + ], + [ + 85, + 202 + ], + [ + 72, + 179 + ], + [ + 69, + 192 + ], + [ + 58, + 531 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 673, + 508 + ], + [ + 675, + 464 + ], + [ + 676, + 461 + ], + [ + 672, + 454 + ], + [ + 675, + 452 + ], + [ + 679, + 452 + ], + [ + 684, + 453 + ], + [ + 682, + 460 + ], + [ + 681, + 508 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 609, + 515 + ], + [ + 609, + 463 + ], + [ + 607, + 461 + ], + [ + 608, + 458 + ], + [ + 613, + 455 + ], + [ + 619, + 458 + ], + [ + 615, + 464 + ], + [ + 617, + 514 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 570, + 524 + ], + [ + 571, + 497 + ], + [ + 569, + 468 + ], + [ + 565, + 465 + ], + [ + 566, + 464 + ], + [ + 568, + 463 + ], + [ + 569, + 462 + ], + [ + 573, + 461 + ], + [ + 577, + 460 + ], + [ + 580, + 462 + ], + [ + 580, + 465 + ], + [ + 578, + 466 + ], + [ + 576, + 466 + ], + [ + 577, + 525 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 456, + 502 + ], + [ + 454, + 519 + ], + [ + 463, + 533 + ], + [ + 475, + 538 + ], + [ + 500, + 537 + ], + [ + 511, + 514 + ], + [ + 503, + 496 + ], + [ + 490, + 487 + ], + [ + 483, + 484 + ], + [ + 476, + 481 + ], + [ + 476, + 474 + ], + [ + 475, + 468 + ], + [ + 477, + 467 + ], + [ + 484, + 460 + ], + [ + 480, + 452 + ], + [ + 479, + 452 + ], + [ + 474, + 456 + ], + [ + 475, + 458 + ], + [ + 468, + 460 + ], + [ + 465, + 461 + ], + [ + 464, + 462 + ], + [ + 463, + 471 + ], + [ + 447, + 478 + ], + [ + 427, + 474 + ], + [ + 426, + 464 + ], + [ + 432, + 461 + ], + [ + 427, + 456 + ], + [ + 404, + 459 + ], + [ + 406, + 463 + ], + [ + 415, + 468 + ], + [ + 414, + 478 + ], + [ + 414, + 483 + ], + [ + 399, + 485 + ], + [ + 393, + 483 + ], + [ + 380, + 490 + ], + [ + 371, + 499 + ], + [ + 370, + 514 + ], + [ + 377, + 527 + ], + [ + 402, + 540 + ], + [ + 416, + 531 + ], + [ + 424, + 514 + ], + [ + 436, + 510 + ], + [ + 441, + 503 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 415, + 541 + ], + [ + 416, + 478 + ], + [ + 416, + 474 + ], + [ + 416, + 467 + ], + [ + 426, + 467 + ], + [ + 424, + 478 + ], + [ + 425, + 543 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 348, + 552 + ], + [ + 349, + 477 + ], + [ + 344, + 476 + ], + [ + 343, + 472 + ], + [ + 349, + 468 + ], + [ + 359, + 468 + ], + [ + 364, + 473 + ], + [ + 357, + 477 + ], + [ + 358, + 552 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 210, + 490 + ], + [ + 179, + 495 + ], + [ + 181, + 505 + ], + [ + 174, + 536 + ], + [ + 181, + 557 + ], + [ + 198, + 571 + ], + [ + 239, + 563 + ], + [ + 239, + 548 + ], + [ + 249, + 545 + ], + [ + 255, + 543 + ], + [ + 260, + 541 + ], + [ + 273, + 552 + ], + [ + 290, + 559 + ], + [ + 293, + 559 + ], + [ + 304, + 552 + ], + [ + 295, + 529 + ], + [ + 290, + 519 + ], + [ + 282, + 511 + ], + [ + 277, + 505 + ], + [ + 276, + 492 + ], + [ + 268, + 491 + ], + [ + 267, + 491 + ], + [ + 267, + 476 + ], + [ + 263, + 471 + ], + [ + 272, + 465 + ], + [ + 273, + 461 + ], + [ + 270, + 460 + ], + [ + 257, + 456 + ], + [ + 244, + 455 + ], + [ + 239, + 455 + ], + [ + 230, + 458 + ], + [ + 224, + 460 + ], + [ + 221, + 466 + ], + [ + 234, + 473 + ], + [ + 244, + 478 + ], + [ + 247, + 485 + ], + [ + 247, + 487 + ], + [ + 251, + 493 + ], + [ + 252, + 510 + ], + [ + 245, + 513 + ], + [ + 240, + 514 + ], + [ + 224, + 488 + ], + [ + 227, + 485 + ], + [ + 232, + 479 + ], + [ + 232, + 476 + ], + [ + 231, + 474 + ], + [ + 215, + 471 + ], + [ + 206, + 472 + ], + [ + 203, + 476 + ], + [ + 206, + 479 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 952, + 435 + ], + [ + 959, + 426 + ], + [ + 950, + 407 + ], + [ + 953, + 402 + ], + [ + 958, + 400 + ], + [ + 963, + 394 + ], + [ + 963, + 398 + ], + [ + 977, + 392 + ], + [ + 984, + 397 + ], + [ + 986, + 399 + ], + [ + 989, + 402 + ], + [ + 992, + 417 + ], + [ + 992, + 417 + ], + [ + 990, + 426 + ], + [ + 987, + 428 + ], + [ + 986, + 430 + ], + [ + 982, + 437 + ], + [ + 984, + 439 + ], + [ + 985, + 444 + ], + [ + 985, + 446 + ], + [ + 988, + 451 + ], + [ + 986, + 455 + ], + [ + 983, + 456 + ], + [ + 979, + 456 + ], + [ + 974, + 455 + ], + [ + 973, + 448 + ], + [ + 967, + 443 + ], + [ + 966, + 448 + ], + [ + 965, + 451 + ], + [ + 965, + 456 + ], + [ + 963, + 458 + ], + [ + 959, + 458 + ], + [ + 953, + 455 + ], + [ + 952, + 444 + ], + [ + 952, + 439 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 307, + 399 + ], + [ + 317, + 403 + ], + [ + 316, + 410 + ], + [ + 317, + 419 + ], + [ + 317, + 420 + ], + [ + 319, + 427 + ], + [ + 329, + 431 + ], + [ + 329, + 440 + ], + [ + 330, + 449 + ], + [ + 330, + 454 + ], + [ + 328, + 459 + ], + [ + 326, + 460 + ], + [ + 322, + 461 + ], + [ + 322, + 472 + ], + [ + 321, + 487 + ], + [ + 321, + 497 + ], + [ + 322, + 504 + ], + [ + 326, + 504 + ], + [ + 327, + 511 + ], + [ + 319, + 511 + ], + [ + 315, + 512 + ], + [ + 312, + 512 + ], + [ + 309, + 502 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 645, + 512 + ], + [ + 645, + 455 + ], + [ + 649, + 456 + ], + [ + 651, + 511 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000073_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000073_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5dd5247b0509140488d7869f7b6bc9f3bee9decf Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000073_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000074_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000074_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c48207f8c027ce00b18127e0a1459e767a6bee55 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000074_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000074_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000074_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..4d74fca646f28297d7273e6f353d283b974c83e5 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000074_000019_gtFine_polygons.json @@ -0,0 +1,6633 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1038, + 164 + ], + [ + 958, + 65 + ], + [ + 711, + 70 + ], + [ + 712, + 157 + ], + [ + 849, + 354 + ], + [ + 960, + 331 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 879, + 316 + ], + [ + 844, + 290 + ], + [ + 537, + 1 + ], + [ + 0, + 1 + ], + [ + 0, + 681 + ], + [ + 0, + 681 + ], + [ + 2048, + 698 + ], + [ + 2048, + 1 + ], + [ + 1145, + 1 + ], + [ + 1024, + 273 + ], + [ + 929, + 329 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2048, + 542 + ], + [ + 1822, + 539 + ], + [ + 1415, + 490 + ], + [ + 1028, + 445 + ], + [ + 969, + 443 + ], + [ + 899, + 443 + ], + [ + 853, + 445 + ], + [ + 205, + 513 + ], + [ + 0, + 529 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 432, + 549 + ], + [ + 323, + 553 + ], + [ + 1, + 572 + ], + [ + 0, + 483 + ], + [ + 205, + 476 + ], + [ + 453, + 489 + ], + [ + 428, + 517 + ], + [ + 535, + 519 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 758, + 144 + ], + [ + 775, + 127 + ], + [ + 779, + 126 + ], + [ + 784, + 108 + ], + [ + 798, + 109 + ], + [ + 812, + 108 + ], + [ + 815, + 115 + ], + [ + 828, + 113 + ], + [ + 827, + 122 + ], + [ + 810, + 134 + ], + [ + 810, + 140 + ], + [ + 826, + 141 + ], + [ + 825, + 146 + ], + [ + 814, + 158 + ], + [ + 817, + 159 + ], + [ + 832, + 150 + ], + [ + 831, + 157 + ], + [ + 847, + 171 + ], + [ + 856, + 167 + ], + [ + 870, + 164 + ], + [ + 854, + 162 + ], + [ + 847, + 152 + ], + [ + 847, + 132 + ], + [ + 858, + 134 + ], + [ + 867, + 123 + ], + [ + 869, + 112 + ], + [ + 883, + 113 + ], + [ + 880, + 127 + ], + [ + 894, + 136 + ], + [ + 910, + 135 + ], + [ + 903, + 151 + ], + [ + 911, + 159 + ], + [ + 930, + 149 + ], + [ + 952, + 148 + ], + [ + 939, + 171 + ], + [ + 944, + 180 + ], + [ + 940, + 189 + ], + [ + 951, + 189 + ], + [ + 953, + 183 + ], + [ + 976, + 182 + ], + [ + 960, + 195 + ], + [ + 957, + 209 + ], + [ + 937, + 212 + ], + [ + 946, + 235 + ], + [ + 931, + 258 + ], + [ + 905, + 264 + ], + [ + 901, + 267 + ], + [ + 916, + 269 + ], + [ + 913, + 282 + ], + [ + 920, + 292 + ], + [ + 917, + 302 + ], + [ + 906, + 300 + ], + [ + 917, + 310 + ], + [ + 918, + 314 + ], + [ + 909, + 313 + ], + [ + 902, + 310 + ], + [ + 894, + 302 + ], + [ + 880, + 299 + ], + [ + 869, + 303 + ], + [ + 842, + 315 + ], + [ + 836, + 329 + ], + [ + 837, + 344 + ], + [ + 843, + 358 + ], + [ + 838, + 366 + ], + [ + 843, + 373 + ], + [ + 845, + 381 + ], + [ + 854, + 387 + ], + [ + 854, + 399 + ], + [ + 861, + 403 + ], + [ + 863, + 419 + ], + [ + 861, + 435 + ], + [ + 863, + 446 + ], + [ + 913, + 443 + ], + [ + 969, + 443 + ], + [ + 994, + 443 + ], + [ + 1001, + 426 + ], + [ + 1018, + 419 + ], + [ + 1030, + 404 + ], + [ + 1016, + 398 + ], + [ + 1016, + 390 + ], + [ + 1029, + 387 + ], + [ + 1028, + 378 + ], + [ + 1036, + 369 + ], + [ + 1050, + 373 + ], + [ + 1050, + 390 + ], + [ + 1055, + 424 + ], + [ + 1055, + 436 + ], + [ + 1114, + 406 + ], + [ + 1108, + 395 + ], + [ + 1107, + 383 + ], + [ + 1099, + 360 + ], + [ + 1109, + 351 + ], + [ + 1099, + 335 + ], + [ + 1106, + 335 + ], + [ + 1109, + 340 + ], + [ + 1115, + 339 + ], + [ + 1139, + 340 + ], + [ + 1157, + 345 + ], + [ + 1168, + 344 + ], + [ + 1148, + 325 + ], + [ + 1134, + 324 + ], + [ + 1117, + 311 + ], + [ + 1137, + 307 + ], + [ + 1154, + 298 + ], + [ + 1156, + 291 + ], + [ + 1159, + 287 + ], + [ + 1164, + 288 + ], + [ + 1179, + 344 + ], + [ + 1187, + 400 + ], + [ + 1195, + 424 + ], + [ + 1232, + 422 + ], + [ + 1231, + 411 + ], + [ + 1219, + 365 + ], + [ + 1208, + 325 + ], + [ + 1197, + 269 + ], + [ + 1214, + 252 + ], + [ + 1242, + 254 + ], + [ + 1254, + 239 + ], + [ + 1264, + 239 + ], + [ + 1266, + 244 + ], + [ + 1282, + 255 + ], + [ + 1286, + 243 + ], + [ + 1292, + 227 + ], + [ + 1304, + 220 + ], + [ + 1309, + 229 + ], + [ + 1312, + 242 + ], + [ + 1312, + 318 + ], + [ + 1313, + 424 + ], + [ + 1317, + 509 + ], + [ + 1339, + 510 + ], + [ + 1330, + 402 + ], + [ + 1332, + 336 + ], + [ + 1363, + 314 + ], + [ + 1387, + 336 + ], + [ + 1423, + 335 + ], + [ + 1446, + 304 + ], + [ + 1431, + 270 + ], + [ + 1442, + 244 + ], + [ + 1467, + 240 + ], + [ + 1478, + 250 + ], + [ + 1500, + 252 + ], + [ + 1505, + 239 + ], + [ + 1497, + 224 + ], + [ + 1528, + 217 + ], + [ + 1600, + 204 + ], + [ + 1625, + 193 + ], + [ + 1630, + 173 + ], + [ + 1588, + 138 + ], + [ + 1612, + 105 + ], + [ + 1619, + 68 + ], + [ + 1599, + 78 + ], + [ + 1576, + 44 + ], + [ + 1564, + 43 + ], + [ + 1552, + 27 + ], + [ + 1561, + 10 + ], + [ + 1561, + 0 + ], + [ + 0, + 1 + ], + [ + 1, + 45 + ], + [ + 24, + 37 + ], + [ + 57, + 50 + ], + [ + 70, + 44 + ], + [ + 93, + 46 + ], + [ + 101, + 56 + ], + [ + 96, + 87 + ], + [ + 101, + 101 + ], + [ + 156, + 96 + ], + [ + 150, + 70 + ], + [ + 174, + 90 + ], + [ + 193, + 86 + ], + [ + 212, + 92 + ], + [ + 227, + 100 + ], + [ + 231, + 123 + ], + [ + 219, + 126 + ], + [ + 208, + 130 + ], + [ + 168, + 124 + ], + [ + 126, + 123 + ], + [ + 93, + 127 + ], + [ + 92, + 151 + ], + [ + 108, + 166 + ], + [ + 128, + 166 + ], + [ + 115, + 178 + ], + [ + 118, + 200 + ], + [ + 129, + 189 + ], + [ + 149, + 197 + ], + [ + 157, + 199 + ], + [ + 176, + 207 + ], + [ + 178, + 227 + ], + [ + 164, + 245 + ], + [ + 182, + 263 + ], + [ + 200, + 249 + ], + [ + 215, + 229 + ], + [ + 240, + 235 + ], + [ + 237, + 348 + ], + [ + 230, + 517 + ], + [ + 263, + 516 + ], + [ + 262, + 404 + ], + [ + 263, + 249 + ], + [ + 292, + 241 + ], + [ + 320, + 248 + ], + [ + 358, + 247 + ], + [ + 395, + 273 + ], + [ + 417, + 276 + ], + [ + 446, + 270 + ], + [ + 473, + 267 + ], + [ + 474, + 257 + ], + [ + 489, + 260 + ], + [ + 503, + 283 + ], + [ + 505, + 297 + ], + [ + 504, + 315 + ], + [ + 505, + 326 + ], + [ + 501, + 370 + ], + [ + 503, + 422 + ], + [ + 507, + 443 + ], + [ + 524, + 438 + ], + [ + 521, + 420 + ], + [ + 520, + 356 + ], + [ + 517, + 328 + ], + [ + 523, + 314 + ], + [ + 520, + 310 + ], + [ + 522, + 294 + ], + [ + 536, + 271 + ], + [ + 565, + 278 + ], + [ + 552, + 263 + ], + [ + 555, + 249 + ], + [ + 594, + 256 + ], + [ + 612, + 258 + ], + [ + 637, + 274 + ], + [ + 657, + 294 + ], + [ + 623, + 304 + ], + [ + 601, + 307 + ], + [ + 595, + 308 + ], + [ + 593, + 329 + ], + [ + 611, + 339 + ], + [ + 604, + 351 + ], + [ + 608, + 369 + ], + [ + 616, + 361 + ], + [ + 626, + 360 + ], + [ + 642, + 358 + ], + [ + 640, + 339 + ], + [ + 648, + 338 + ], + [ + 647, + 350 + ], + [ + 656, + 355 + ], + [ + 674, + 359 + ], + [ + 692, + 362 + ], + [ + 698, + 362 + ], + [ + 696, + 383 + ], + [ + 694, + 434 + ], + [ + 744, + 438 + ], + [ + 742, + 419 + ], + [ + 744, + 384 + ], + [ + 764, + 343 + ], + [ + 778, + 343 + ], + [ + 790, + 357 + ], + [ + 801, + 360 + ], + [ + 821, + 347 + ], + [ + 841, + 341 + ], + [ + 866, + 307 + ], + [ + 868, + 286 + ], + [ + 877, + 281 + ], + [ + 876, + 271 + ], + [ + 868, + 267 + ], + [ + 865, + 255 + ], + [ + 861, + 249 + ], + [ + 857, + 238 + ], + [ + 850, + 234 + ], + [ + 845, + 227 + ], + [ + 832, + 224 + ], + [ + 832, + 219 + ], + [ + 837, + 212 + ], + [ + 843, + 216 + ], + [ + 852, + 213 + ], + [ + 837, + 200 + ], + [ + 825, + 195 + ], + [ + 814, + 201 + ], + [ + 820, + 190 + ], + [ + 820, + 184 + ], + [ + 824, + 181 + ], + [ + 818, + 172 + ], + [ + 823, + 167 + ], + [ + 814, + 166 + ], + [ + 807, + 168 + ], + [ + 797, + 164 + ], + [ + 797, + 157 + ], + [ + 792, + 146 + ], + [ + 775, + 150 + ], + [ + 768, + 151 + ], + [ + 764, + 147 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1425, + 560 + ], + [ + 1180, + 545 + ], + [ + 1129, + 540 + ], + [ + 1111, + 530 + ], + [ + 1111, + 522 + ], + [ + 1182, + 514 + ], + [ + 1195, + 500 + ], + [ + 1423, + 484 + ], + [ + 1460, + 483 + ], + [ + 1771, + 516 + ], + [ + 2048, + 522 + ], + [ + 2048, + 569 + ], + [ + 1564, + 564 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1455, + 717 + ], + [ + 1459, + 694 + ], + [ + 1468, + 676 + ], + [ + 1497, + 656 + ], + [ + 1537, + 638 + ], + [ + 1630, + 619 + ], + [ + 1725, + 608 + ], + [ + 1759, + 604 + ], + [ + 1813, + 598 + ], + [ + 1970, + 592 + ], + [ + 1997, + 598 + ], + [ + 2048, + 604 + ], + [ + 2048, + 1023 + ], + [ + 1930, + 1024 + ], + [ + 1805, + 951 + ], + [ + 1541, + 791 + ], + [ + 1479, + 755 + ], + [ + 1457, + 728 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 959, + 438 + ], + [ + 965, + 435 + ], + [ + 974, + 430 + ], + [ + 981, + 428 + ], + [ + 983, + 418 + ], + [ + 984, + 410 + ], + [ + 992, + 410 + ], + [ + 995, + 412 + ], + [ + 1007, + 412 + ], + [ + 1017, + 410 + ], + [ + 1021, + 408 + ], + [ + 1030, + 407 + ], + [ + 1036, + 401 + ], + [ + 1038, + 392 + ], + [ + 1036, + 385 + ], + [ + 1036, + 380 + ], + [ + 1040, + 378 + ], + [ + 1046, + 378 + ], + [ + 1049, + 381 + ], + [ + 1051, + 383 + ], + [ + 1051, + 392 + ], + [ + 1053, + 398 + ], + [ + 1055, + 411 + ], + [ + 1056, + 429 + ], + [ + 1057, + 444 + ], + [ + 1030, + 449 + ], + [ + 994, + 450 + ], + [ + 968, + 445 + ], + [ + 960, + 442 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 988, + 452 + ], + [ + 988, + 447 + ], + [ + 988, + 442 + ], + [ + 985, + 436 + ], + [ + 969, + 436 + ], + [ + 966, + 443 + ], + [ + 966, + 454 + ], + [ + 969, + 454 + ], + [ + 971, + 452 + ], + [ + 984, + 452 + ], + [ + 985, + 454 + ], + [ + 988, + 454 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 903, + 405 + ], + [ + 899, + 411 + ], + [ + 901, + 415 + ], + [ + 901, + 418 + ], + [ + 901, + 424 + ], + [ + 901, + 428 + ], + [ + 899, + 431 + ], + [ + 898, + 439 + ], + [ + 900, + 445 + ], + [ + 914, + 445 + ], + [ + 969, + 443 + ], + [ + 978, + 429 + ], + [ + 977, + 422 + ], + [ + 969, + 409 + ], + [ + 964, + 402 + ], + [ + 948, + 403 + ], + [ + 940, + 407 + ], + [ + 928, + 412 + ], + [ + 918, + 407 + ], + [ + 909, + 405 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 877, + 426 + ], + [ + 867, + 427 + ], + [ + 860, + 427 + ], + [ + 855, + 427 + ], + [ + 852, + 432 + ], + [ + 840, + 440 + ], + [ + 854, + 452 + ], + [ + 877, + 452 + ], + [ + 877, + 432 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 895, + 431 + ], + [ + 878, + 431 + ], + [ + 875, + 434 + ], + [ + 873, + 444 + ], + [ + 874, + 452 + ], + [ + 875, + 455 + ], + [ + 879, + 455 + ], + [ + 880, + 453 + ], + [ + 895, + 453 + ], + [ + 896, + 454 + ], + [ + 899, + 454 + ], + [ + 900, + 451 + ], + [ + 900, + 447 + ], + [ + 899, + 441 + ], + [ + 900, + 440 + ], + [ + 901, + 439 + ], + [ + 900, + 438 + ], + [ + 898, + 437 + ], + [ + 897, + 433 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 933, + 429 + ], + [ + 916, + 429 + ], + [ + 914, + 431 + ], + [ + 914, + 435 + ], + [ + 911, + 435 + ], + [ + 910, + 437 + ], + [ + 911, + 438 + ], + [ + 912, + 439 + ], + [ + 913, + 442 + ], + [ + 914, + 447 + ], + [ + 938, + 443 + ], + [ + 938, + 439 + ], + [ + 939, + 438 + ], + [ + 939, + 436 + ], + [ + 936, + 435 + ], + [ + 936, + 432 + ], + [ + 935, + 430 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 943, + 451 + ], + [ + 942, + 446 + ], + [ + 940, + 443 + ], + [ + 939, + 441 + ], + [ + 937, + 438 + ], + [ + 934, + 436 + ], + [ + 919, + 436 + ], + [ + 915, + 439 + ], + [ + 913, + 441 + ], + [ + 912, + 444 + ], + [ + 912, + 450 + ], + [ + 912, + 459 + ], + [ + 913, + 460 + ], + [ + 916, + 460 + ], + [ + 917, + 457 + ], + [ + 938, + 457 + ], + [ + 938, + 460 + ], + [ + 942, + 460 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1015, + 376 + ], + [ + 1015, + 359 + ], + [ + 1006, + 359 + ], + [ + 1006, + 376 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1061, + 378 + ], + [ + 1061, + 431 + ], + [ + 1062, + 431 + ], + [ + 1062, + 378 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1060, + 366 + ], + [ + 1056, + 369 + ], + [ + 1055, + 373 + ], + [ + 1056, + 377 + ], + [ + 1057, + 379 + ], + [ + 1060, + 380 + ], + [ + 1063, + 380 + ], + [ + 1066, + 378 + ], + [ + 1068, + 374 + ], + [ + 1067, + 368 + ], + [ + 1063, + 366 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1031, + 459 + ], + [ + 1031, + 451 + ], + [ + 1032, + 442 + ], + [ + 1039, + 435 + ], + [ + 1048, + 430 + ], + [ + 1060, + 427 + ], + [ + 1072, + 424 + ], + [ + 1049, + 469 + ], + [ + 1037, + 468 + ], + [ + 1032, + 465 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1181, + 394 + ], + [ + 1112, + 397 + ], + [ + 1107, + 402 + ], + [ + 1098, + 403 + ], + [ + 1089, + 407 + ], + [ + 1067, + 426 + ], + [ + 1046, + 435 + ], + [ + 1041, + 443 + ], + [ + 1038, + 451 + ], + [ + 1039, + 457 + ], + [ + 1040, + 463 + ], + [ + 1044, + 467 + ], + [ + 1050, + 470 + ], + [ + 1055, + 470 + ], + [ + 1063, + 470 + ], + [ + 1192, + 417 + ], + [ + 1187, + 399 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1066, + 445 + ], + [ + 1075, + 441 + ], + [ + 1086, + 437 + ], + [ + 1118, + 419 + ], + [ + 1124, + 416 + ], + [ + 1130, + 414 + ], + [ + 1152, + 411 + ], + [ + 1164, + 411 + ], + [ + 1189, + 412 + ], + [ + 1192, + 417 + ], + [ + 1085, + 478 + ], + [ + 1064, + 477 + ], + [ + 1061, + 472 + ], + [ + 1058, + 461 + ], + [ + 1059, + 456 + ], + [ + 1061, + 454 + ], + [ + 1063, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1113, + 483 + ], + [ + 1089, + 482 + ], + [ + 1081, + 480 + ], + [ + 1073, + 469 + ], + [ + 1076, + 451 + ], + [ + 1080, + 440 + ], + [ + 1083, + 438 + ], + [ + 1092, + 435 + ], + [ + 1113, + 422 + ], + [ + 1120, + 422 + ], + [ + 1146, + 420 + ], + [ + 1166, + 419 + ], + [ + 1128, + 481 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1111, + 443 + ], + [ + 1125, + 432 + ], + [ + 1139, + 425 + ], + [ + 1146, + 422 + ], + [ + 1153, + 422 + ], + [ + 1130, + 482 + ], + [ + 1121, + 483 + ], + [ + 1111, + 481 + ], + [ + 1104, + 474 + ], + [ + 1101, + 467 + ], + [ + 1102, + 458 + ], + [ + 1107, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1115, + 465 + ], + [ + 1120, + 454 + ], + [ + 1124, + 442 + ], + [ + 1131, + 440 + ], + [ + 1144, + 426 + ], + [ + 1149, + 421 + ], + [ + 1159, + 419 + ], + [ + 1174, + 418 + ], + [ + 1131, + 483 + ], + [ + 1120, + 483 + ], + [ + 1117, + 477 + ], + [ + 1116, + 472 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1138, + 489 + ], + [ + 1129, + 483 + ], + [ + 1126, + 477 + ], + [ + 1123, + 468 + ], + [ + 1125, + 461 + ], + [ + 1129, + 451 + ], + [ + 1133, + 443 + ], + [ + 1151, + 421 + ], + [ + 1182, + 416 + ], + [ + 1197, + 416 + ], + [ + 1216, + 416 + ], + [ + 1204, + 456 + ], + [ + 1168, + 501 + ], + [ + 1157, + 499 + ], + [ + 1147, + 493 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1268, + 410 + ], + [ + 1267, + 405 + ], + [ + 1264, + 403 + ], + [ + 1261, + 405 + ], + [ + 1259, + 410 + ], + [ + 1259, + 415 + ], + [ + 1265, + 416 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1299, + 411 + ], + [ + 1298, + 405 + ], + [ + 1298, + 398 + ], + [ + 1293, + 397 + ], + [ + 1289, + 401 + ], + [ + 1290, + 409 + ], + [ + 1291, + 413 + ], + [ + 1298, + 413 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1363, + 433 + ], + [ + 1369, + 422 + ], + [ + 1367, + 408 + ], + [ + 1362, + 401 + ], + [ + 1358, + 395 + ], + [ + 1357, + 390 + ], + [ + 1353, + 388 + ], + [ + 1351, + 391 + ], + [ + 1351, + 397 + ], + [ + 1349, + 401 + ], + [ + 1346, + 405 + ], + [ + 1342, + 420 + ], + [ + 1341, + 429 + ], + [ + 1360, + 440 + ], + [ + 1362, + 434 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1313, + 411 + ], + [ + 1297, + 410 + ], + [ + 1254, + 409 + ], + [ + 1214, + 411 + ], + [ + 1193, + 419 + ], + [ + 1188, + 427 + ], + [ + 1185, + 442 + ], + [ + 1180, + 451 + ], + [ + 1177, + 460 + ], + [ + 1313, + 431 + ], + [ + 1313, + 420 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1166, + 470 + ], + [ + 1170, + 463 + ], + [ + 1175, + 461 + ], + [ + 1182, + 452 + ], + [ + 1190, + 445 + ], + [ + 1200, + 439 + ], + [ + 1218, + 426 + ], + [ + 1233, + 419 + ], + [ + 1271, + 414 + ], + [ + 1301, + 411 + ], + [ + 1313, + 411 + ], + [ + 1314, + 435 + ], + [ + 1317, + 497 + ], + [ + 1243, + 505 + ], + [ + 1220, + 510 + ], + [ + 1188, + 514 + ], + [ + 1180, + 513 + ], + [ + 1174, + 508 + ], + [ + 1172, + 503 + ], + [ + 1165, + 503 + ], + [ + 1161, + 497 + ], + [ + 1157, + 486 + ], + [ + 1160, + 476 + ], + [ + 1163, + 474 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1404, + 371 + ], + [ + 1402, + 452 + ], + [ + 1405, + 452 + ], + [ + 1407, + 371 + ], + [ + 1404, + 370 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1240, + 354 + ], + [ + 1238, + 350 + ], + [ + 1237, + 345 + ], + [ + 1238, + 341 + ], + [ + 1241, + 339 + ], + [ + 1243, + 339 + ], + [ + 1246, + 341 + ], + [ + 1247, + 344 + ], + [ + 1247, + 349 + ], + [ + 1246, + 354 + ], + [ + 1242, + 356 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1246, + 379 + ], + [ + 1247, + 373 + ], + [ + 1248, + 372 + ], + [ + 1247, + 354 + ], + [ + 1237, + 355 + ], + [ + 1236, + 363 + ], + [ + 1236, + 373 + ], + [ + 1237, + 379 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1185, + 447 + ], + [ + 1179, + 447 + ], + [ + 1180, + 516 + ], + [ + 1186, + 516 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1163, + 523 + ], + [ + 1160, + 515 + ], + [ + 1159, + 448 + ], + [ + 1152, + 448 + ], + [ + 1152, + 514 + ], + [ + 1150, + 523 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1411, + 453 + ], + [ + 1396, + 448 + ], + [ + 1387, + 442 + ], + [ + 1374, + 438 + ], + [ + 1359, + 426 + ], + [ + 1338, + 416 + ], + [ + 1331, + 413 + ], + [ + 1337, + 492 + ], + [ + 1339, + 498 + ], + [ + 1413, + 504 + ], + [ + 1428, + 504 + ], + [ + 1433, + 497 + ], + [ + 1433, + 476 + ], + [ + 1426, + 462 + ], + [ + 1417, + 456 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1233, + 516 + ], + [ + 1216, + 515 + ], + [ + 1216, + 513 + ], + [ + 1231, + 507 + ], + [ + 1242, + 505 + ], + [ + 1248, + 498 + ], + [ + 1337, + 488 + ], + [ + 1374, + 507 + ], + [ + 1378, + 512 + ], + [ + 1349, + 516 + ], + [ + 1322, + 517 + ], + [ + 1267, + 516 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1297, + 442 + ], + [ + 1303, + 443 + ], + [ + 1315, + 452 + ], + [ + 1317, + 499 + ], + [ + 1279, + 499 + ], + [ + 1269, + 499 + ], + [ + 1260, + 503 + ], + [ + 1256, + 503 + ], + [ + 1251, + 497 + ], + [ + 1250, + 490 + ], + [ + 1255, + 478 + ], + [ + 1263, + 469 + ], + [ + 1269, + 464 + ], + [ + 1277, + 461 + ], + [ + 1274, + 457 + ], + [ + 1276, + 453 + ], + [ + 1278, + 448 + ], + [ + 1285, + 446 + ], + [ + 1288, + 452 + ], + [ + 1293, + 453 + ], + [ + 1292, + 448 + ], + [ + 1295, + 443 + ], + [ + 1296, + 442 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1336, + 427 + ], + [ + 1335, + 429 + ], + [ + 1340, + 433 + ], + [ + 1338, + 439 + ], + [ + 1337, + 443 + ], + [ + 1334, + 445 + ], + [ + 1334, + 446 + ], + [ + 1337, + 494 + ], + [ + 1338, + 497 + ], + [ + 1358, + 493 + ], + [ + 1349, + 467 + ], + [ + 1339, + 466 + ], + [ + 1335, + 464 + ], + [ + 1335, + 458 + ], + [ + 1343, + 454 + ], + [ + 1343, + 446 + ], + [ + 1342, + 440 + ], + [ + 1343, + 432 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1221, + 534 + ], + [ + 1220, + 529 + ], + [ + 1219, + 455 + ], + [ + 1210, + 454 + ], + [ + 1208, + 534 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1288, + 530 + ], + [ + 1287, + 524 + ], + [ + 1285, + 448 + ], + [ + 1278, + 448 + ], + [ + 1278, + 525 + ], + [ + 1277, + 530 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1408, + 372 + ], + [ + 1409, + 356 + ], + [ + 1408, + 335 + ], + [ + 1397, + 331 + ], + [ + 1397, + 373 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1500, + 441 + ], + [ + 1510, + 441 + ], + [ + 1511, + 535 + ], + [ + 1500, + 534 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1203, + 230 + ], + [ + 1201, + 534 + ], + [ + 1212, + 534 + ], + [ + 1212, + 230 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1183, + 256 + ], + [ + 1186, + 269 + ], + [ + 1201, + 272 + ], + [ + 1201, + 278 + ], + [ + 1184, + 280 + ], + [ + 1185, + 289 + ], + [ + 1202, + 293 + ], + [ + 1202, + 302 + ], + [ + 1184, + 303 + ], + [ + 1184, + 311 + ], + [ + 1202, + 319 + ], + [ + 1204, + 327 + ], + [ + 1207, + 329 + ], + [ + 1210, + 329 + ], + [ + 1212, + 327 + ], + [ + 1216, + 326 + ], + [ + 1216, + 250 + ], + [ + 1202, + 251 + ], + [ + 1202, + 254 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1436, + 422 + ], + [ + 1432, + 424 + ], + [ + 1431, + 426 + ], + [ + 1411, + 429 + ], + [ + 1401, + 428 + ], + [ + 1385, + 435 + ], + [ + 1386, + 437 + ], + [ + 1401, + 431 + ], + [ + 1407, + 432 + ], + [ + 1423, + 431 + ], + [ + 1428, + 446 + ], + [ + 1394, + 447 + ], + [ + 1392, + 442 + ], + [ + 1388, + 440 + ], + [ + 1368, + 441 + ], + [ + 1367, + 445 + ], + [ + 1375, + 448 + ], + [ + 1375, + 456 + ], + [ + 1342, + 460 + ], + [ + 1345, + 468 + ], + [ + 1341, + 474 + ], + [ + 1339, + 475 + ], + [ + 1335, + 481 + ], + [ + 1336, + 486 + ], + [ + 1335, + 499 + ], + [ + 1339, + 501 + ], + [ + 1341, + 512 + ], + [ + 1345, + 521 + ], + [ + 1352, + 528 + ], + [ + 1368, + 532 + ], + [ + 1383, + 528 + ], + [ + 1389, + 522 + ], + [ + 1393, + 517 + ], + [ + 1403, + 517 + ], + [ + 1411, + 512 + ], + [ + 1419, + 517 + ], + [ + 1428, + 518 + ], + [ + 1428, + 515 + ], + [ + 1421, + 514 + ], + [ + 1411, + 506 + ], + [ + 1416, + 497 + ], + [ + 1416, + 489 + ], + [ + 1433, + 465 + ], + [ + 1436, + 469 + ], + [ + 1430, + 480 + ], + [ + 1428, + 498 + ], + [ + 1430, + 516 + ], + [ + 1435, + 522 + ], + [ + 1444, + 532 + ], + [ + 1451, + 536 + ], + [ + 1461, + 537 + ], + [ + 1472, + 534 + ], + [ + 1480, + 527 + ], + [ + 1485, + 515 + ], + [ + 1485, + 504 + ], + [ + 1481, + 487 + ], + [ + 1471, + 473 + ], + [ + 1455, + 463 + ], + [ + 1449, + 461 + ], + [ + 1444, + 453 + ], + [ + 1441, + 453 + ], + [ + 1437, + 454 + ], + [ + 1426, + 431 + ], + [ + 1429, + 429 + ], + [ + 1437, + 428 + ], + [ + 1442, + 426 + ], + [ + 1441, + 422 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1553, + 347 + ], + [ + 1536, + 352 + ], + [ + 1528, + 357 + ], + [ + 1526, + 537 + ], + [ + 1583, + 539 + ], + [ + 1582, + 356 + ], + [ + 1576, + 355 + ], + [ + 1563, + 347 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1557, + 248 + ], + [ + 1553, + 538 + ], + [ + 1565, + 539 + ], + [ + 1566, + 247 + ], + [ + 1561, + 231 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1538, + 167 + ], + [ + 1537, + 248 + ], + [ + 1591, + 249 + ], + [ + 1592, + 167 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1958, + 454 + ], + [ + 1961, + 450 + ], + [ + 1965, + 449 + ], + [ + 1971, + 450 + ], + [ + 1973, + 454 + ], + [ + 1977, + 586 + ], + [ + 1983, + 597 + ], + [ + 1984, + 601 + ], + [ + 1962, + 601 + ], + [ + 1953, + 599 + ], + [ + 1954, + 597 + ], + [ + 1959, + 591 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1786, + 459 + ], + [ + 1788, + 454 + ], + [ + 1794, + 451 + ], + [ + 1800, + 451 + ], + [ + 1805, + 456 + ], + [ + 1806, + 474 + ], + [ + 1807, + 477 + ], + [ + 1805, + 483 + ], + [ + 1806, + 595 + ], + [ + 1807, + 600 + ], + [ + 1806, + 603 + ], + [ + 1801, + 610 + ], + [ + 1782, + 610 + ], + [ + 1785, + 604 + ], + [ + 1786, + 598 + ], + [ + 1789, + 595 + ], + [ + 1787, + 483 + ], + [ + 1786, + 481 + ], + [ + 1786, + 477 + ], + [ + 1787, + 474 + ], + [ + 1786, + 461 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1984, + 861 + ], + [ + 1993, + 656 + ], + [ + 2013, + 642 + ], + [ + 2035, + 637 + ], + [ + 2048, + 637 + ], + [ + 2047, + 874 + ], + [ + 2007, + 871 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1984, + 86 + ], + [ + 1989, + 71 + ], + [ + 1994, + 56 + ], + [ + 2005, + 48 + ], + [ + 2018, + 49 + ], + [ + 2028, + 59 + ], + [ + 2039, + 64 + ], + [ + 2048, + 55 + ], + [ + 2047, + 147 + ], + [ + 2026, + 122 + ], + [ + 2015, + 120 + ], + [ + 1986, + 97 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2016, + 555 + ], + [ + 2021, + 544 + ], + [ + 2020, + 450 + ], + [ + 2024, + 440 + ], + [ + 2027, + 171 + ], + [ + 2026, + 80 + ], + [ + 2041, + 80 + ], + [ + 2038, + 438 + ], + [ + 2042, + 450 + ], + [ + 2043, + 525 + ], + [ + 2048, + 546 + ], + [ + 2047, + 557 + ], + [ + 2025, + 557 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1996, + 180 + ], + [ + 2000, + 175 + ], + [ + 2022, + 175 + ], + [ + 2029, + 177 + ], + [ + 2028, + 265 + ], + [ + 2028, + 281 + ], + [ + 2010, + 279 + ], + [ + 1995, + 273 + ], + [ + 1996, + 182 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1985, + 101 + ], + [ + 1988, + 94 + ], + [ + 2026, + 82 + ], + [ + 2026, + 113 + ], + [ + 2019, + 116 + ], + [ + 2019, + 131 + ], + [ + 1999, + 137 + ], + [ + 1999, + 122 + ], + [ + 1989, + 124 + ], + [ + 1986, + 120 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1580, + 652 + ], + [ + 1579, + 643 + ], + [ + 1581, + 640 + ], + [ + 1581, + 474 + ], + [ + 1583, + 468 + ], + [ + 1587, + 464 + ], + [ + 1592, + 463 + ], + [ + 1598, + 465 + ], + [ + 1602, + 470 + ], + [ + 1602, + 481 + ], + [ + 1603, + 503 + ], + [ + 1605, + 650 + ], + [ + 1609, + 656 + ], + [ + 1609, + 659 + ], + [ + 1581, + 660 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1648, + 448 + ], + [ + 1650, + 443 + ], + [ + 1653, + 442 + ], + [ + 1657, + 443 + ], + [ + 1658, + 445 + ], + [ + 1659, + 539 + ], + [ + 1647, + 538 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1619, + 726 + ], + [ + 1617, + 720 + ], + [ + 1610, + 712 + ], + [ + 1608, + 704 + ], + [ + 1606, + 699 + ], + [ + 1605, + 510 + ], + [ + 1604, + 503 + ], + [ + 1599, + 498 + ], + [ + 1593, + 497 + ], + [ + 1587, + 499 + ], + [ + 1582, + 505 + ], + [ + 1581, + 517 + ], + [ + 1583, + 700 + ], + [ + 1581, + 705 + ], + [ + 1581, + 713 + ], + [ + 1578, + 722 + ], + [ + 1581, + 727 + ], + [ + 1599, + 729 + ], + [ + 1611, + 729 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1681, + 630 + ], + [ + 1681, + 627 + ], + [ + 1676, + 623 + ], + [ + 1675, + 615 + ], + [ + 1672, + 610 + ], + [ + 1673, + 464 + ], + [ + 1670, + 456 + ], + [ + 1666, + 455 + ], + [ + 1660, + 455 + ], + [ + 1656, + 460 + ], + [ + 1655, + 467 + ], + [ + 1654, + 612 + ], + [ + 1651, + 622 + ], + [ + 1649, + 624 + ], + [ + 1648, + 629 + ], + [ + 1662, + 631 + ], + [ + 1672, + 631 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1887, + 785 + ], + [ + 1876, + 761 + ], + [ + 1878, + 517 + ], + [ + 1869, + 484 + ], + [ + 1872, + 0 + ], + [ + 1841, + 0 + ], + [ + 1834, + 486 + ], + [ + 1824, + 517 + ], + [ + 1822, + 757 + ], + [ + 1808, + 781 + ], + [ + 1836, + 788 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 795, + 434 + ], + [ + 786, + 432 + ], + [ + 769, + 431 + ], + [ + 763, + 433 + ], + [ + 788, + 450 + ], + [ + 797, + 436 + ], + [ + 794, + 433 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 761, + 430 + ], + [ + 743, + 430 + ], + [ + 736, + 430 + ], + [ + 750, + 464 + ], + [ + 781, + 466 + ], + [ + 788, + 463 + ], + [ + 787, + 445 + ], + [ + 783, + 440 + ], + [ + 776, + 434 + ], + [ + 768, + 432 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 789, + 460 + ], + [ + 780, + 459 + ], + [ + 773, + 456 + ], + [ + 768, + 449 + ], + [ + 761, + 447 + ], + [ + 756, + 442 + ], + [ + 753, + 439 + ], + [ + 745, + 438 + ], + [ + 726, + 465 + ], + [ + 727, + 469 + ], + [ + 761, + 470 + ], + [ + 788, + 467 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 746, + 435 + ], + [ + 739, + 429 + ], + [ + 731, + 428 + ], + [ + 740, + 465 + ], + [ + 748, + 465 + ], + [ + 752, + 457 + ], + [ + 749, + 451 + ], + [ + 749, + 443 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 729, + 427 + ], + [ + 717, + 426 + ], + [ + 725, + 467 + ], + [ + 730, + 467 + ], + [ + 737, + 466 + ], + [ + 742, + 462 + ], + [ + 743, + 455 + ], + [ + 741, + 447 + ], + [ + 739, + 438 + ], + [ + 734, + 432 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 726, + 471 + ], + [ + 734, + 471 + ], + [ + 739, + 469 + ], + [ + 740, + 463 + ], + [ + 739, + 458 + ], + [ + 729, + 449 + ], + [ + 724, + 449 + ], + [ + 720, + 470 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 823, + 421 + ], + [ + 806, + 422 + ], + [ + 797, + 424 + ], + [ + 790, + 438 + ], + [ + 785, + 438 + ], + [ + 782, + 439 + ], + [ + 782, + 442 + ], + [ + 784, + 443 + ], + [ + 787, + 444 + ], + [ + 784, + 452 + ], + [ + 784, + 468 + ], + [ + 784, + 477 + ], + [ + 785, + 479 + ], + [ + 796, + 479 + ], + [ + 797, + 477 + ], + [ + 797, + 473 + ], + [ + 800, + 474 + ], + [ + 827, + 473 + ], + [ + 837, + 472 + ], + [ + 844, + 472 + ], + [ + 844, + 476 + ], + [ + 848, + 477 + ], + [ + 853, + 477 + ], + [ + 854, + 470 + ], + [ + 854, + 453 + ], + [ + 851, + 441 + ], + [ + 855, + 441 + ], + [ + 857, + 439 + ], + [ + 857, + 436 + ], + [ + 855, + 435 + ], + [ + 848, + 435 + ], + [ + 847, + 437 + ], + [ + 841, + 424 + ], + [ + 839, + 421 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 703, + 426 + ], + [ + 683, + 425 + ], + [ + 698, + 474 + ], + [ + 704, + 475 + ], + [ + 709, + 472 + ], + [ + 712, + 449 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 616, + 420 + ], + [ + 581, + 420 + ], + [ + 563, + 421 + ], + [ + 550, + 429 + ], + [ + 596, + 464 + ], + [ + 616, + 423 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1731, + 31 + ], + [ + 1733, + 6 + ], + [ + 1733, + 1 + ], + [ + 1842, + 1 + ], + [ + 1840, + 100 + ], + [ + 1831, + 98 + ], + [ + 1832, + 79 + ], + [ + 1806, + 78 + ], + [ + 1806, + 69 + ], + [ + 1785, + 68 + ], + [ + 1787, + 48 + ], + [ + 1736, + 36 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1911, + 59 + ], + [ + 1911, + 38 + ], + [ + 1932, + 36 + ], + [ + 1932, + 1 + ], + [ + 1842, + 1 + ], + [ + 1842, + 42 + ], + [ + 1864, + 45 + ], + [ + 1867, + 62 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 426, + 363 + ], + [ + 437, + 377 + ], + [ + 445, + 393 + ], + [ + 435, + 398 + ], + [ + 422, + 391 + ], + [ + 420, + 401 + ], + [ + 507, + 438 + ], + [ + 526, + 418 + ], + [ + 522, + 404 + ], + [ + 518, + 402 + ], + [ + 504, + 407 + ], + [ + 480, + 396 + ], + [ + 468, + 401 + ], + [ + 466, + 396 + ], + [ + 474, + 378 + ], + [ + 468, + 374 + ], + [ + 455, + 386 + ], + [ + 452, + 393 + ], + [ + 449, + 382 + ], + [ + 435, + 366 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 598, + 450 + ], + [ + 578, + 425 + ], + [ + 573, + 423 + ], + [ + 553, + 421 + ], + [ + 520, + 420 + ], + [ + 500, + 420 + ], + [ + 519, + 493 + ], + [ + 539, + 493 + ], + [ + 542, + 498 + ], + [ + 548, + 499 + ], + [ + 556, + 499 + ], + [ + 562, + 497 + ], + [ + 566, + 499 + ], + [ + 572, + 501 + ], + [ + 581, + 501 + ], + [ + 587, + 497 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 270, + 444 + ], + [ + 298, + 426 + ], + [ + 322, + 410 + ], + [ + 342, + 403 + ], + [ + 360, + 401 + ], + [ + 399, + 398 + ], + [ + 444, + 399 + ], + [ + 465, + 400 + ], + [ + 484, + 405 + ], + [ + 502, + 414 + ], + [ + 517, + 429 + ], + [ + 523, + 439 + ], + [ + 524, + 454 + ], + [ + 523, + 466 + ], + [ + 526, + 466 + ], + [ + 528, + 472 + ], + [ + 527, + 487 + ], + [ + 517, + 498 + ], + [ + 509, + 501 + ], + [ + 503, + 507 + ], + [ + 497, + 511 + ], + [ + 459, + 513 + ], + [ + 424, + 517 + ], + [ + 416, + 513 + ], + [ + 414, + 507 + ], + [ + 349, + 509 + ], + [ + 346, + 506 + ], + [ + 340, + 514 + ], + [ + 332, + 518 + ], + [ + 318, + 515 + ], + [ + 306, + 509 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 294, + 460 + ], + [ + 292, + 451 + ], + [ + 299, + 447 + ], + [ + 307, + 438 + ], + [ + 307, + 436 + ], + [ + 287, + 440 + ], + [ + 263, + 442 + ], + [ + 262, + 511 + ], + [ + 309, + 511 + ], + [ + 319, + 515 + ], + [ + 325, + 511 + ], + [ + 328, + 496 + ], + [ + 322, + 481 + ], + [ + 309, + 468 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 225, + 448 + ], + [ + 220, + 449 + ], + [ + 218, + 458 + ], + [ + 213, + 464 + ], + [ + 201, + 471 + ], + [ + 196, + 482 + ], + [ + 197, + 501 + ], + [ + 204, + 511 + ], + [ + 216, + 516 + ], + [ + 229, + 514 + ], + [ + 233, + 443 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 77, + 360 + ], + [ + 77, + 355 + ], + [ + 81, + 352 + ], + [ + 81, + 343 + ], + [ + 78, + 332 + ], + [ + 67, + 328 + ], + [ + 58, + 331 + ], + [ + 56, + 343 + ], + [ + 59, + 352 + ], + [ + 54, + 359 + ], + [ + 47, + 374 + ], + [ + 42, + 399 + ], + [ + 45, + 424 + ], + [ + 57, + 448 + ], + [ + 65, + 467 + ], + [ + 42, + 487 + ], + [ + 31, + 493 + ], + [ + 22, + 502 + ], + [ + 28, + 511 + ], + [ + 41, + 523 + ], + [ + 49, + 524 + ], + [ + 48, + 520 + ], + [ + 43, + 511 + ], + [ + 61, + 499 + ], + [ + 58, + 519 + ], + [ + 59, + 528 + ], + [ + 65, + 530 + ], + [ + 91, + 527 + ], + [ + 91, + 524 + ], + [ + 81, + 519 + ], + [ + 79, + 508 + ], + [ + 85, + 478 + ], + [ + 90, + 469 + ], + [ + 87, + 454 + ], + [ + 83, + 426 + ], + [ + 90, + 428 + ], + [ + 96, + 431 + ], + [ + 98, + 426 + ], + [ + 94, + 420 + ], + [ + 87, + 416 + ], + [ + 83, + 393 + ], + [ + 82, + 377 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 113, + 454 + ], + [ + 105, + 437 + ], + [ + 99, + 423 + ], + [ + 96, + 425 + ], + [ + 95, + 429 + ], + [ + 106, + 456 + ], + [ + 117, + 486 + ], + [ + 84, + 511 + ], + [ + 77, + 512 + ], + [ + 72, + 520 + ], + [ + 75, + 526 + ], + [ + 81, + 529 + ], + [ + 87, + 525 + ], + [ + 87, + 520 + ], + [ + 87, + 516 + ], + [ + 111, + 498 + ], + [ + 120, + 499 + ], + [ + 123, + 508 + ], + [ + 125, + 513 + ], + [ + 135, + 513 + ], + [ + 132, + 520 + ], + [ + 135, + 527 + ], + [ + 140, + 529 + ], + [ + 152, + 525 + ], + [ + 155, + 516 + ], + [ + 150, + 508 + ], + [ + 147, + 486 + ], + [ + 134, + 484 + ], + [ + 130, + 466 + ], + [ + 146, + 462 + ], + [ + 143, + 457 + ], + [ + 125, + 454 + ], + [ + 124, + 448 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 46, + 538 + ], + [ + 44, + 444 + ], + [ + 32, + 444 + ], + [ + 33, + 539 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 193, + 538 + ], + [ + 191, + 446 + ], + [ + 181, + 445 + ], + [ + 181, + 539 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 277, + 535 + ], + [ + 278, + 445 + ], + [ + 267, + 444 + ], + [ + 265, + 534 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 356, + 191 + ], + [ + 350, + 199 + ], + [ + 347, + 212 + ], + [ + 351, + 225 + ], + [ + 357, + 231 + ], + [ + 366, + 235 + ], + [ + 383, + 235 + ], + [ + 393, + 229 + ], + [ + 400, + 218 + ], + [ + 399, + 202 + ], + [ + 391, + 191 + ], + [ + 380, + 186 + ], + [ + 368, + 185 + ], + [ + 359, + 189 + ], + [ + 354, + 194 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 396, + 325 + ], + [ + 396, + 322 + ], + [ + 400, + 321 + ], + [ + 404, + 315 + ], + [ + 403, + 309 + ], + [ + 397, + 304 + ], + [ + 396, + 295 + ], + [ + 402, + 291 + ], + [ + 402, + 285 + ], + [ + 395, + 280 + ], + [ + 395, + 273 + ], + [ + 399, + 271 + ], + [ + 400, + 267 + ], + [ + 398, + 263 + ], + [ + 390, + 261 + ], + [ + 373, + 261 + ], + [ + 376, + 329 + ], + [ + 387, + 331 + ], + [ + 388, + 325 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 386, + 534 + ], + [ + 385, + 457 + ], + [ + 382, + 450 + ], + [ + 378, + 230 + ], + [ + 376, + 222 + ], + [ + 373, + 81 + ], + [ + 368, + 87 + ], + [ + 370, + 223 + ], + [ + 368, + 232 + ], + [ + 373, + 450 + ], + [ + 370, + 461 + ], + [ + 371, + 535 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 498, + 444 + ], + [ + 488, + 518 + ], + [ + 497, + 518 + ], + [ + 506, + 443 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 468, + 448 + ], + [ + 467, + 521 + ], + [ + 476, + 521 + ], + [ + 477, + 448 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 442, + 446 + ], + [ + 442, + 525 + ], + [ + 451, + 525 + ], + [ + 451, + 447 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 358, + 457 + ], + [ + 361, + 543 + ], + [ + 370, + 543 + ], + [ + 366, + 457 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 508, + 11 + ], + [ + 432, + 28 + ], + [ + 434, + 23 + ], + [ + 499, + 8 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 361, + 327 + ], + [ + 361, + 252 + ], + [ + 375, + 250 + ], + [ + 382, + 252 + ], + [ + 391, + 254 + ], + [ + 395, + 255 + ], + [ + 398, + 259 + ], + [ + 397, + 263 + ], + [ + 394, + 267 + ], + [ + 377, + 269 + ], + [ + 376, + 277 + ], + [ + 393, + 278 + ], + [ + 396, + 280 + ], + [ + 396, + 286 + ], + [ + 391, + 291 + ], + [ + 377, + 291 + ], + [ + 377, + 300 + ], + [ + 393, + 301 + ], + [ + 396, + 305 + ], + [ + 397, + 310 + ], + [ + 392, + 314 + ], + [ + 378, + 316 + ], + [ + 372, + 328 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 585, + 501 + ], + [ + 582, + 496 + ], + [ + 581, + 478 + ], + [ + 585, + 465 + ], + [ + 588, + 456 + ], + [ + 578, + 454 + ], + [ + 577, + 451 + ], + [ + 578, + 447 + ], + [ + 582, + 446 + ], + [ + 588, + 447 + ], + [ + 591, + 449 + ], + [ + 598, + 435 + ], + [ + 603, + 427 + ], + [ + 609, + 422 + ], + [ + 619, + 419 + ], + [ + 648, + 417 + ], + [ + 675, + 417 + ], + [ + 681, + 419 + ], + [ + 689, + 424 + ], + [ + 695, + 434 + ], + [ + 702, + 450 + ], + [ + 707, + 475 + ], + [ + 707, + 495 + ], + [ + 704, + 501 + ], + [ + 699, + 504 + ], + [ + 694, + 505 + ], + [ + 691, + 494 + ], + [ + 689, + 496 + ], + [ + 688, + 507 + ], + [ + 685, + 510 + ], + [ + 681, + 510 + ], + [ + 678, + 507 + ], + [ + 677, + 500 + ], + [ + 640, + 504 + ], + [ + 620, + 504 + ], + [ + 618, + 505 + ], + [ + 618, + 510 + ], + [ + 614, + 512 + ], + [ + 611, + 512 + ], + [ + 607, + 509 + ], + [ + 607, + 504 + ], + [ + 599, + 504 + ], + [ + 598, + 512 + ], + [ + 591, + 515 + ], + [ + 587, + 514 + ], + [ + 585, + 510 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 649, + 490 + ], + [ + 649, + 483 + ], + [ + 614, + 483 + ], + [ + 614, + 491 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 728, + 477 + ], + [ + 729, + 457 + ], + [ + 725, + 446 + ], + [ + 725, + 443 + ], + [ + 728, + 440 + ], + [ + 727, + 434 + ], + [ + 720, + 427 + ], + [ + 713, + 420 + ], + [ + 711, + 414 + ], + [ + 707, + 410 + ], + [ + 701, + 409 + ], + [ + 699, + 413 + ], + [ + 700, + 420 + ], + [ + 703, + 425 + ], + [ + 695, + 429 + ], + [ + 697, + 440 + ], + [ + 701, + 451 + ], + [ + 701, + 460 + ], + [ + 705, + 463 + ], + [ + 707, + 480 + ], + [ + 708, + 494 + ], + [ + 711, + 504 + ], + [ + 705, + 505 + ], + [ + 705, + 508 + ], + [ + 711, + 509 + ], + [ + 725, + 507 + ], + [ + 725, + 502 + ], + [ + 727, + 501 + ], + [ + 725, + 496 + ], + [ + 727, + 486 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000075_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000075_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..aad3c38293e11b07ce74abd62c1eb9e67807b08d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000075_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..cfe75c4e6b2298d8cced5f6341f7141b5adbbf8c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..53f10cce3f4895621c022aadaaa68dccaf68b363 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..a80b35cdc424e52490b1cd08e15650c5bfb15f67 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000076_000019_gtFine_polygons.json @@ -0,0 +1,5433 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1546, + 0 + ], + [ + 1515, + 17 + ], + [ + 1308, + 48 + ], + [ + 1293, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1724, + 384 + ], + [ + 1037, + 401 + ], + [ + 0, + 446 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 400 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1658, + 428 + ], + [ + 1588, + 433 + ], + [ + 1563, + 435 + ], + [ + 1438, + 445 + ], + [ + 1338, + 451 + ], + [ + 1267, + 455 + ], + [ + 1105, + 466 + ], + [ + 1012, + 472 + ], + [ + 883, + 482 + ], + [ + 837, + 485 + ], + [ + 757, + 488 + ], + [ + 519, + 505 + ], + [ + 326, + 516 + ], + [ + 0, + 535 + ], + [ + 0, + 415 + ], + [ + 1748, + 348 + ], + [ + 1751, + 425 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 0 + ], + [ + 0, + 495 + ], + [ + 381, + 481 + ], + [ + 395, + 477 + ], + [ + 483, + 472 + ], + [ + 551, + 470 + ], + [ + 809, + 457 + ], + [ + 889, + 453 + ], + [ + 1092, + 443 + ], + [ + 1280, + 433 + ], + [ + 1344, + 431 + ], + [ + 1450, + 427 + ], + [ + 1598, + 427 + ], + [ + 1655, + 427 + ], + [ + 1680, + 418 + ], + [ + 1709, + 1 + ], + [ + 1452, + 1 + ], + [ + 1398, + 9 + ], + [ + 1397, + 2 + ], + [ + 1380, + 4 + ], + [ + 1380, + 14 + ], + [ + 1360, + 16 + ], + [ + 1361, + 7 + ], + [ + 1349, + 9 + ], + [ + 1350, + 19 + ], + [ + 1314, + 22 + ], + [ + 1313, + 1 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1322, + 499 + ], + [ + 1347, + 491 + ], + [ + 1376, + 483 + ], + [ + 1435, + 472 + ], + [ + 1515, + 461 + ], + [ + 1609, + 454 + ], + [ + 1682, + 451 + ], + [ + 2048, + 484 + ], + [ + 2048, + 1024 + ], + [ + 1789, + 868 + ], + [ + 1520, + 684 + ], + [ + 1336, + 558 + ], + [ + 1313, + 533 + ], + [ + 1312, + 513 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 636, + 429 + ], + [ + 633, + 379 + ], + [ + 457, + 385 + ], + [ + 255, + 393 + ], + [ + 255, + 429 + ], + [ + 372, + 485 + ], + [ + 549, + 479 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 223, + 430 + ], + [ + 225, + 385 + ], + [ + 227, + 364 + ], + [ + 225, + 355 + ], + [ + 228, + 347 + ], + [ + 230, + 323 + ], + [ + 234, + 312 + ], + [ + 212, + 297 + ], + [ + 184, + 291 + ], + [ + 180, + 190 + ], + [ + 166, + 181 + ], + [ + 165, + 175 + ], + [ + 173, + 177 + ], + [ + 181, + 180 + ], + [ + 176, + 172 + ], + [ + 172, + 169 + ], + [ + 172, + 160 + ], + [ + 161, + 154 + ], + [ + 158, + 140 + ], + [ + 162, + 139 + ], + [ + 158, + 132 + ], + [ + 162, + 123 + ], + [ + 157, + 106 + ], + [ + 146, + 95 + ], + [ + 135, + 98 + ], + [ + 132, + 95 + ], + [ + 131, + 92 + ], + [ + 141, + 91 + ], + [ + 126, + 84 + ], + [ + 117, + 88 + ], + [ + 86, + 88 + ], + [ + 74, + 90 + ], + [ + 90, + 99 + ], + [ + 90, + 104 + ], + [ + 96, + 107 + ], + [ + 102, + 108 + ], + [ + 102, + 114 + ], + [ + 90, + 114 + ], + [ + 76, + 116 + ], + [ + 67, + 118 + ], + [ + 73, + 125 + ], + [ + 57, + 125 + ], + [ + 1, + 136 + ], + [ + 6, + 130 + ], + [ + 6, + 121 + ], + [ + 0, + 119 + ], + [ + 0, + 0 + ], + [ + 558, + 0 + ], + [ + 562, + 6 + ], + [ + 558, + 6 + ], + [ + 556, + 14 + ], + [ + 556, + 24 + ], + [ + 565, + 16 + ], + [ + 576, + 25 + ], + [ + 555, + 33 + ], + [ + 555, + 37 + ], + [ + 558, + 38 + ], + [ + 565, + 39 + ], + [ + 577, + 42 + ], + [ + 574, + 48 + ], + [ + 563, + 51 + ], + [ + 565, + 60 + ], + [ + 574, + 64 + ], + [ + 591, + 62 + ], + [ + 585, + 70 + ], + [ + 574, + 77 + ], + [ + 571, + 79 + ], + [ + 562, + 77 + ], + [ + 553, + 71 + ], + [ + 542, + 75 + ], + [ + 534, + 76 + ], + [ + 525, + 81 + ], + [ + 505, + 79 + ], + [ + 495, + 70 + ], + [ + 490, + 70 + ], + [ + 490, + 80 + ], + [ + 467, + 66 + ], + [ + 461, + 62 + ], + [ + 461, + 58 + ], + [ + 456, + 57 + ], + [ + 450, + 62 + ], + [ + 452, + 70 + ], + [ + 454, + 75 + ], + [ + 459, + 80 + ], + [ + 466, + 86 + ], + [ + 458, + 87 + ], + [ + 446, + 95 + ], + [ + 439, + 96 + ], + [ + 426, + 89 + ], + [ + 408, + 84 + ], + [ + 399, + 82 + ], + [ + 396, + 77 + ], + [ + 394, + 76 + ], + [ + 393, + 86 + ], + [ + 387, + 93 + ], + [ + 379, + 97 + ], + [ + 374, + 105 + ], + [ + 371, + 114 + ], + [ + 373, + 123 + ], + [ + 380, + 117 + ], + [ + 383, + 120 + ], + [ + 379, + 127 + ], + [ + 375, + 130 + ], + [ + 378, + 137 + ], + [ + 383, + 145 + ], + [ + 380, + 151 + ], + [ + 372, + 154 + ], + [ + 363, + 147 + ], + [ + 358, + 156 + ], + [ + 347, + 165 + ], + [ + 341, + 175 + ], + [ + 332, + 182 + ], + [ + 332, + 188 + ], + [ + 318, + 199 + ], + [ + 299, + 224 + ], + [ + 285, + 249 + ], + [ + 269, + 281 + ], + [ + 260, + 294 + ], + [ + 266, + 303 + ], + [ + 269, + 316 + ], + [ + 258, + 324 + ], + [ + 255, + 352 + ], + [ + 250, + 387 + ], + [ + 245, + 429 + ], + [ + 227, + 439 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 115, + 479 + ], + [ + 105, + 463 + ], + [ + 91, + 443 + ], + [ + 85, + 438 + ], + [ + 73, + 437 + ], + [ + 19, + 438 + ], + [ + 0, + 437 + ], + [ + 0, + 525 + ], + [ + 26, + 524 + ], + [ + 90, + 530 + ], + [ + 100, + 530 + ], + [ + 111, + 526 + ], + [ + 116, + 519 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 174, + 538 + ], + [ + 88, + 543 + ], + [ + 0, + 549 + ], + [ + 0, + 603 + ], + [ + 195, + 573 + ], + [ + 314, + 550 + ], + [ + 312, + 536 + ], + [ + 253, + 534 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 88, + 558 + ], + [ + 76, + 377 + ], + [ + 62, + 198 + ], + [ + 50, + 0 + ], + [ + 4, + 0 + ], + [ + 24, + 560 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 284, + 417 + ], + [ + 251, + 418 + ], + [ + 209, + 425 + ], + [ + 190, + 431 + ], + [ + 167, + 446 + ], + [ + 151, + 459 + ], + [ + 128, + 466 + ], + [ + 108, + 475 + ], + [ + 103, + 481 + ], + [ + 100, + 491 + ], + [ + 98, + 500 + ], + [ + 101, + 514 + ], + [ + 100, + 519 + ], + [ + 106, + 520 + ], + [ + 115, + 520 + ], + [ + 120, + 528 + ], + [ + 125, + 533 + ], + [ + 144, + 531 + ], + [ + 149, + 528 + ], + [ + 155, + 521 + ], + [ + 183, + 518 + ], + [ + 186, + 525 + ], + [ + 192, + 529 + ], + [ + 215, + 528 + ], + [ + 220, + 524 + ], + [ + 225, + 517 + ], + [ + 271, + 513 + ], + [ + 277, + 520 + ], + [ + 285, + 523 + ], + [ + 294, + 523 + ], + [ + 303, + 518 + ], + [ + 306, + 511 + ], + [ + 320, + 510 + ], + [ + 337, + 512 + ], + [ + 343, + 517 + ], + [ + 347, + 519 + ], + [ + 364, + 518 + ], + [ + 368, + 515 + ], + [ + 372, + 506 + ], + [ + 373, + 503 + ], + [ + 378, + 496 + ], + [ + 380, + 485 + ], + [ + 378, + 475 + ], + [ + 375, + 470 + ], + [ + 371, + 452 + ], + [ + 358, + 437 + ], + [ + 338, + 424 + ], + [ + 317, + 419 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 419, + 108 + ], + [ + 424, + 199 + ], + [ + 426, + 213 + ], + [ + 440, + 481 + ], + [ + 438, + 483 + ], + [ + 454, + 483 + ], + [ + 451, + 481 + ], + [ + 437, + 214 + ], + [ + 438, + 209 + ], + [ + 431, + 108 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1435, + 370 + ], + [ + 1417, + 366 + ], + [ + 1330, + 366 + ], + [ + 1330, + 369 + ], + [ + 1332, + 377 + ], + [ + 1440, + 374 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 956, + 352 + ], + [ + 957, + 380 + ], + [ + 1098, + 384 + ], + [ + 1097, + 355 + ], + [ + 1097, + 348 + ], + [ + 1021, + 348 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1223, + 375 + ], + [ + 1154, + 375 + ], + [ + 1068, + 376 + ], + [ + 934, + 377 + ], + [ + 907, + 377 + ], + [ + 909, + 447 + ], + [ + 1093, + 444 + ], + [ + 1114, + 445 + ], + [ + 1286, + 434 + ], + [ + 1379, + 431 + ], + [ + 1440, + 430 + ], + [ + 1502, + 375 + ], + [ + 1499, + 371 + ], + [ + 1465, + 370 + ], + [ + 1429, + 370 + ], + [ + 1329, + 373 + ], + [ + 1274, + 374 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1127, + 430 + ], + [ + 1120, + 222 + ], + [ + 1111, + 222 + ], + [ + 1120, + 433 + ], + [ + 1126, + 432 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 583, + 83 + ], + [ + 588, + 80 + ], + [ + 583, + 76 + ], + [ + 582, + 74 + ], + [ + 591, + 74 + ], + [ + 595, + 70 + ], + [ + 585, + 63 + ], + [ + 584, + 56 + ], + [ + 590, + 55 + ], + [ + 583, + 48 + ], + [ + 592, + 42 + ], + [ + 591, + 31 + ], + [ + 598, + 22 + ], + [ + 608, + 30 + ], + [ + 620, + 19 + ], + [ + 625, + 32 + ], + [ + 642, + 37 + ], + [ + 646, + 29 + ], + [ + 651, + 17 + ], + [ + 655, + 22 + ], + [ + 656, + 30 + ], + [ + 665, + 37 + ], + [ + 668, + 27 + ], + [ + 672, + 30 + ], + [ + 673, + 35 + ], + [ + 680, + 36 + ], + [ + 684, + 39 + ], + [ + 691, + 43 + ], + [ + 701, + 34 + ], + [ + 702, + 44 + ], + [ + 712, + 42 + ], + [ + 720, + 35 + ], + [ + 726, + 34 + ], + [ + 727, + 44 + ], + [ + 718, + 47 + ], + [ + 724, + 51 + ], + [ + 729, + 59 + ], + [ + 729, + 63 + ], + [ + 739, + 63 + ], + [ + 751, + 40 + ], + [ + 758, + 40 + ], + [ + 765, + 37 + ], + [ + 775, + 39 + ], + [ + 785, + 34 + ], + [ + 783, + 30 + ], + [ + 786, + 25 + ], + [ + 784, + 11 + ], + [ + 780, + 2 + ], + [ + 772, + 0 + ], + [ + 768, + 5 + ], + [ + 771, + 12 + ], + [ + 777, + 19 + ], + [ + 763, + 20 + ], + [ + 757, + 17 + ], + [ + 749, + 13 + ], + [ + 735, + 13 + ], + [ + 732, + 9 + ], + [ + 731, + 0 + ], + [ + 963, + 1 + ], + [ + 965, + 6 + ], + [ + 957, + 9 + ], + [ + 955, + 19 + ], + [ + 955, + 28 + ], + [ + 959, + 37 + ], + [ + 962, + 40 + ], + [ + 970, + 26 + ], + [ + 983, + 33 + ], + [ + 988, + 28 + ], + [ + 995, + 23 + ], + [ + 990, + 6 + ], + [ + 994, + 1 + ], + [ + 1073, + 0 + ], + [ + 1078, + 4 + ], + [ + 1090, + 10 + ], + [ + 1098, + 20 + ], + [ + 1102, + 29 + ], + [ + 1103, + 32 + ], + [ + 1120, + 29 + ], + [ + 1130, + 49 + ], + [ + 1140, + 51 + ], + [ + 1144, + 66 + ], + [ + 1133, + 68 + ], + [ + 1136, + 79 + ], + [ + 1145, + 78 + ], + [ + 1151, + 84 + ], + [ + 1151, + 92 + ], + [ + 1156, + 95 + ], + [ + 1158, + 103 + ], + [ + 1153, + 108 + ], + [ + 1152, + 118 + ], + [ + 1154, + 125 + ], + [ + 1136, + 132 + ], + [ + 1140, + 149 + ], + [ + 1146, + 162 + ], + [ + 1127, + 164 + ], + [ + 1111, + 162 + ], + [ + 1099, + 140 + ], + [ + 1098, + 132 + ], + [ + 1084, + 129 + ], + [ + 1068, + 129 + ], + [ + 1072, + 118 + ], + [ + 1057, + 116 + ], + [ + 1053, + 108 + ], + [ + 1046, + 108 + ], + [ + 1043, + 114 + ], + [ + 1042, + 123 + ], + [ + 1025, + 119 + ], + [ + 1019, + 126 + ], + [ + 1025, + 130 + ], + [ + 1000, + 131 + ], + [ + 996, + 138 + ], + [ + 991, + 140 + ], + [ + 985, + 153 + ], + [ + 975, + 164 + ], + [ + 954, + 173 + ], + [ + 966, + 182 + ], + [ + 950, + 178 + ], + [ + 940, + 182 + ], + [ + 938, + 189 + ], + [ + 944, + 193 + ], + [ + 937, + 195 + ], + [ + 930, + 194 + ], + [ + 928, + 196 + ], + [ + 928, + 204 + ], + [ + 928, + 210 + ], + [ + 922, + 212 + ], + [ + 917, + 210 + ], + [ + 903, + 227 + ], + [ + 895, + 232 + ], + [ + 894, + 240 + ], + [ + 894, + 245 + ], + [ + 903, + 240 + ], + [ + 911, + 249 + ], + [ + 901, + 266 + ], + [ + 902, + 268 + ], + [ + 900, + 275 + ], + [ + 903, + 294 + ], + [ + 903, + 318 + ], + [ + 907, + 334 + ], + [ + 908, + 359 + ], + [ + 912, + 367 + ], + [ + 909, + 385 + ], + [ + 913, + 431 + ], + [ + 892, + 428 + ], + [ + 893, + 391 + ], + [ + 889, + 360 + ], + [ + 885, + 316 + ], + [ + 881, + 278 + ], + [ + 876, + 254 + ], + [ + 869, + 237 + ], + [ + 863, + 224 + ], + [ + 844, + 190 + ], + [ + 831, + 173 + ], + [ + 830, + 178 + ], + [ + 819, + 178 + ], + [ + 824, + 191 + ], + [ + 808, + 196 + ], + [ + 798, + 196 + ], + [ + 791, + 189 + ], + [ + 778, + 190 + ], + [ + 771, + 198 + ], + [ + 776, + 206 + ], + [ + 764, + 210 + ], + [ + 753, + 218 + ], + [ + 740, + 223 + ], + [ + 736, + 219 + ], + [ + 725, + 219 + ], + [ + 728, + 215 + ], + [ + 738, + 207 + ], + [ + 728, + 204 + ], + [ + 711, + 204 + ], + [ + 711, + 186 + ], + [ + 725, + 167 + ], + [ + 719, + 168 + ], + [ + 685, + 179 + ], + [ + 677, + 176 + ], + [ + 691, + 158 + ], + [ + 708, + 151 + ], + [ + 718, + 136 + ], + [ + 709, + 124 + ], + [ + 693, + 113 + ], + [ + 687, + 107 + ], + [ + 683, + 99 + ], + [ + 673, + 108 + ], + [ + 670, + 106 + ], + [ + 668, + 95 + ], + [ + 667, + 86 + ], + [ + 650, + 84 + ], + [ + 640, + 85 + ], + [ + 634, + 89 + ], + [ + 606, + 97 + ], + [ + 595, + 95 + ], + [ + 591, + 91 + ], + [ + 584, + 90 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 772, + 419 + ], + [ + 749, + 0 + ], + [ + 736, + 0 + ], + [ + 745, + 287 + ], + [ + 749, + 416 + ], + [ + 753, + 421 + ], + [ + 757, + 422 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 808, + 433 + ], + [ + 808, + 477 + ], + [ + 813, + 477 + ], + [ + 813, + 433 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 817, + 470 + ], + [ + 827, + 465 + ], + [ + 841, + 458 + ], + [ + 850, + 442 + ], + [ + 866, + 427 + ], + [ + 880, + 400 + ], + [ + 894, + 417 + ], + [ + 905, + 421 + ], + [ + 922, + 404 + ], + [ + 937, + 406 + ], + [ + 951, + 425 + ], + [ + 885, + 473 + ], + [ + 842, + 475 + ], + [ + 822, + 473 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 649, + 415 + ], + [ + 632, + 421 + ], + [ + 601, + 441 + ], + [ + 567, + 449 + ], + [ + 546, + 455 + ], + [ + 541, + 461 + ], + [ + 539, + 467 + ], + [ + 536, + 474 + ], + [ + 539, + 490 + ], + [ + 543, + 493 + ], + [ + 549, + 494 + ], + [ + 552, + 496 + ], + [ + 558, + 503 + ], + [ + 562, + 505 + ], + [ + 580, + 503 + ], + [ + 583, + 500 + ], + [ + 586, + 496 + ], + [ + 604, + 495 + ], + [ + 614, + 495 + ], + [ + 616, + 495 + ], + [ + 618, + 500 + ], + [ + 621, + 503 + ], + [ + 643, + 501 + ], + [ + 646, + 498 + ], + [ + 651, + 492 + ], + [ + 690, + 490 + ], + [ + 692, + 494 + ], + [ + 696, + 496 + ], + [ + 703, + 497 + ], + [ + 709, + 496 + ], + [ + 715, + 494 + ], + [ + 719, + 488 + ], + [ + 749, + 487 + ], + [ + 753, + 490 + ], + [ + 755, + 493 + ], + [ + 760, + 495 + ], + [ + 773, + 495 + ], + [ + 780, + 492 + ], + [ + 785, + 483 + ], + [ + 801, + 477 + ], + [ + 809, + 474 + ], + [ + 812, + 468 + ], + [ + 811, + 459 + ], + [ + 808, + 454 + ], + [ + 806, + 440 + ], + [ + 804, + 434 + ], + [ + 783, + 415 + ], + [ + 781, + 413 + ], + [ + 749, + 409 + ], + [ + 744, + 401 + ], + [ + 744, + 400 + ], + [ + 741, + 398 + ], + [ + 728, + 400 + ], + [ + 726, + 404 + ], + [ + 727, + 409 + ], + [ + 702, + 410 + ], + [ + 671, + 411 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1008, + 330 + ], + [ + 1000, + 330 + ], + [ + 1001, + 370 + ], + [ + 1007, + 370 + ], + [ + 1011, + 365 + ], + [ + 1018, + 361 + ], + [ + 1018, + 357 + ], + [ + 1010, + 356 + ], + [ + 1010, + 355 + ], + [ + 1018, + 351 + ], + [ + 1017, + 346 + ], + [ + 1009, + 345 + ], + [ + 1009, + 342 + ], + [ + 1017, + 338 + ], + [ + 1017, + 332 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1004, + 410 + ], + [ + 999, + 318 + ], + [ + 995, + 253 + ], + [ + 996, + 239 + ], + [ + 1000, + 232 + ], + [ + 1006, + 223 + ], + [ + 1077, + 194 + ], + [ + 1109, + 189 + ], + [ + 1109, + 186 + ], + [ + 1077, + 191 + ], + [ + 1005, + 221 + ], + [ + 999, + 228 + ], + [ + 995, + 235 + ], + [ + 992, + 247 + ], + [ + 998, + 410 + ], + [ + 1001, + 416 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1107, + 180 + ], + [ + 1110, + 177 + ], + [ + 1122, + 176 + ], + [ + 1125, + 180 + ], + [ + 1127, + 220 + ], + [ + 1124, + 224 + ], + [ + 1111, + 225 + ], + [ + 1107, + 221 + ], + [ + 1106, + 182 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1050, + 406 + ], + [ + 1038, + 404 + ], + [ + 1000, + 406 + ], + [ + 981, + 407 + ], + [ + 958, + 410 + ], + [ + 951, + 413 + ], + [ + 937, + 425 + ], + [ + 934, + 426 + ], + [ + 930, + 428 + ], + [ + 926, + 433 + ], + [ + 907, + 437 + ], + [ + 882, + 446 + ], + [ + 878, + 451 + ], + [ + 876, + 459 + ], + [ + 877, + 467 + ], + [ + 879, + 472 + ], + [ + 890, + 474 + ], + [ + 894, + 481 + ], + [ + 898, + 485 + ], + [ + 903, + 485 + ], + [ + 913, + 483 + ], + [ + 916, + 478 + ], + [ + 931, + 477 + ], + [ + 948, + 477 + ], + [ + 952, + 482 + ], + [ + 955, + 483 + ], + [ + 970, + 481 + ], + [ + 974, + 478 + ], + [ + 978, + 472 + ], + [ + 986, + 472 + ], + [ + 990, + 477 + ], + [ + 995, + 478 + ], + [ + 1003, + 477 + ], + [ + 1006, + 475 + ], + [ + 1009, + 472 + ], + [ + 1042, + 471 + ], + [ + 1045, + 475 + ], + [ + 1049, + 477 + ], + [ + 1061, + 476 + ], + [ + 1065, + 472 + ], + [ + 1069, + 465 + ], + [ + 1076, + 462 + ], + [ + 1081, + 456 + ], + [ + 1083, + 447 + ], + [ + 1080, + 442 + ], + [ + 1081, + 433 + ], + [ + 1080, + 427 + ], + [ + 1061, + 408 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1289, + 151 + ], + [ + 1291, + 224 + ], + [ + 1290, + 228 + ], + [ + 1297, + 415 + ], + [ + 1298, + 447 + ], + [ + 1304, + 447 + ], + [ + 1299, + 229 + ], + [ + 1297, + 217 + ], + [ + 1298, + 151 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1335, + 412 + ], + [ + 1336, + 445 + ], + [ + 1339, + 446 + ], + [ + 1338, + 411 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1294, + 414 + ], + [ + 1295, + 448 + ], + [ + 1299, + 448 + ], + [ + 1297, + 413 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1223, + 334 + ], + [ + 1223, + 368 + ], + [ + 1240, + 367 + ], + [ + 1239, + 331 + ], + [ + 1223, + 332 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1232, + 401 + ], + [ + 1231, + 332 + ], + [ + 1229, + 332 + ], + [ + 1230, + 400 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1168, + 402 + ], + [ + 1153, + 408 + ], + [ + 1142, + 417 + ], + [ + 1139, + 418 + ], + [ + 1137, + 420 + ], + [ + 1136, + 422 + ], + [ + 1133, + 425 + ], + [ + 1116, + 429 + ], + [ + 1105, + 433 + ], + [ + 1099, + 437 + ], + [ + 1098, + 442 + ], + [ + 1098, + 455 + ], + [ + 1102, + 461 + ], + [ + 1106, + 465 + ], + [ + 1108, + 468 + ], + [ + 1111, + 470 + ], + [ + 1116, + 471 + ], + [ + 1121, + 470 + ], + [ + 1128, + 467 + ], + [ + 1132, + 464 + ], + [ + 1163, + 464 + ], + [ + 1165, + 467 + ], + [ + 1169, + 469 + ], + [ + 1183, + 467 + ], + [ + 1186, + 465 + ], + [ + 1190, + 461 + ], + [ + 1193, + 464 + ], + [ + 1198, + 466 + ], + [ + 1204, + 465 + ], + [ + 1208, + 463 + ], + [ + 1212, + 459 + ], + [ + 1245, + 458 + ], + [ + 1248, + 461 + ], + [ + 1250, + 463 + ], + [ + 1265, + 462 + ], + [ + 1267, + 458 + ], + [ + 1271, + 453 + ], + [ + 1274, + 451 + ], + [ + 1276, + 441 + ], + [ + 1276, + 428 + ], + [ + 1274, + 422 + ], + [ + 1270, + 417 + ], + [ + 1251, + 400 + ], + [ + 1240, + 398 + ], + [ + 1215, + 398 + ], + [ + 1205, + 400 + ], + [ + 1191, + 400 + ], + [ + 1180, + 400 + ], + [ + 1174, + 400 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1319, + 176 + ], + [ + 1312, + 1 + ], + [ + 1352, + 0 + ], + [ + 1340, + 4 + ], + [ + 1341, + 7 + ], + [ + 1345, + 18 + ], + [ + 1347, + 37 + ], + [ + 1349, + 45 + ], + [ + 1340, + 50 + ], + [ + 1340, + 56 + ], + [ + 1351, + 64 + ], + [ + 1357, + 71 + ], + [ + 1349, + 78 + ], + [ + 1358, + 86 + ], + [ + 1370, + 80 + ], + [ + 1364, + 68 + ], + [ + 1364, + 58 + ], + [ + 1367, + 51 + ], + [ + 1359, + 34 + ], + [ + 1373, + 24 + ], + [ + 1395, + 19 + ], + [ + 1407, + 12 + ], + [ + 1397, + 4 + ], + [ + 1390, + 2 + ], + [ + 1390, + 0 + ], + [ + 1613, + 0 + ], + [ + 1561, + 223 + ], + [ + 1530, + 225 + ], + [ + 1517, + 222 + ], + [ + 1502, + 220 + ], + [ + 1502, + 245 + ], + [ + 1513, + 249 + ], + [ + 1531, + 261 + ], + [ + 1524, + 261 + ], + [ + 1510, + 253 + ], + [ + 1514, + 264 + ], + [ + 1512, + 268 + ], + [ + 1520, + 276 + ], + [ + 1510, + 278 + ], + [ + 1504, + 278 + ], + [ + 1511, + 308 + ], + [ + 1511, + 354 + ], + [ + 1509, + 389 + ], + [ + 1503, + 380 + ], + [ + 1503, + 317 + ], + [ + 1494, + 278 + ], + [ + 1488, + 268 + ], + [ + 1473, + 268 + ], + [ + 1467, + 260 + ], + [ + 1473, + 251 + ], + [ + 1473, + 244 + ], + [ + 1472, + 242 + ], + [ + 1468, + 245 + ], + [ + 1466, + 253 + ], + [ + 1460, + 254 + ], + [ + 1453, + 269 + ], + [ + 1462, + 266 + ], + [ + 1465, + 267 + ], + [ + 1454, + 275 + ], + [ + 1456, + 280 + ], + [ + 1458, + 294 + ], + [ + 1469, + 283 + ], + [ + 1473, + 285 + ], + [ + 1473, + 294 + ], + [ + 1469, + 303 + ], + [ + 1474, + 306 + ], + [ + 1463, + 318 + ], + [ + 1462, + 317 + ], + [ + 1458, + 325 + ], + [ + 1459, + 340 + ], + [ + 1461, + 351 + ], + [ + 1462, + 361 + ], + [ + 1460, + 374 + ], + [ + 1467, + 375 + ], + [ + 1485, + 378 + ], + [ + 1492, + 383 + ], + [ + 1497, + 406 + ], + [ + 1437, + 439 + ], + [ + 1419, + 436 + ], + [ + 1426, + 430 + ], + [ + 1427, + 420 + ], + [ + 1427, + 411 + ], + [ + 1418, + 407 + ], + [ + 1416, + 405 + ], + [ + 1422, + 401 + ], + [ + 1437, + 387 + ], + [ + 1442, + 377 + ], + [ + 1445, + 379 + ], + [ + 1447, + 370 + ], + [ + 1447, + 335 + ], + [ + 1446, + 303 + ], + [ + 1443, + 272 + ], + [ + 1438, + 256 + ], + [ + 1434, + 251 + ], + [ + 1430, + 257 + ], + [ + 1423, + 257 + ], + [ + 1425, + 252 + ], + [ + 1424, + 240 + ], + [ + 1418, + 244 + ], + [ + 1413, + 239 + ], + [ + 1407, + 245 + ], + [ + 1401, + 249 + ], + [ + 1378, + 257 + ], + [ + 1369, + 269 + ], + [ + 1361, + 270 + ], + [ + 1358, + 262 + ], + [ + 1362, + 255 + ], + [ + 1365, + 245 + ], + [ + 1357, + 245 + ], + [ + 1352, + 247 + ], + [ + 1345, + 252 + ], + [ + 1355, + 262 + ], + [ + 1348, + 262 + ], + [ + 1350, + 268 + ], + [ + 1344, + 266 + ], + [ + 1342, + 256 + ], + [ + 1339, + 252 + ], + [ + 1330, + 256 + ], + [ + 1326, + 253 + ], + [ + 1326, + 246 + ], + [ + 1320, + 233 + ], + [ + 1311, + 237 + ], + [ + 1313, + 231 + ], + [ + 1302, + 228 + ], + [ + 1293, + 224 + ], + [ + 1292, + 220 + ], + [ + 1297, + 210 + ], + [ + 1300, + 201 + ], + [ + 1302, + 184 + ], + [ + 1296, + 181 + ], + [ + 1302, + 167 + ], + [ + 1302, + 179 + ], + [ + 1312, + 174 + ], + [ + 1318, + 174 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1463, + 403 + ], + [ + 1460, + 407 + ], + [ + 1454, + 407 + ], + [ + 1451, + 411 + ], + [ + 1439, + 418 + ], + [ + 1435, + 423 + ], + [ + 1435, + 433 + ], + [ + 1437, + 440 + ], + [ + 1441, + 448 + ], + [ + 1443, + 449 + ], + [ + 1452, + 448 + ], + [ + 1455, + 444 + ], + [ + 1478, + 443 + ], + [ + 1480, + 444 + ], + [ + 1490, + 444 + ], + [ + 1494, + 446 + ], + [ + 1498, + 447 + ], + [ + 1514, + 441 + ], + [ + 1537, + 438 + ], + [ + 1541, + 440 + ], + [ + 1546, + 445 + ], + [ + 1555, + 445 + ], + [ + 1563, + 411 + ], + [ + 1540, + 389 + ], + [ + 1486, + 390 + ], + [ + 1473, + 396 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1612, + 395 + ], + [ + 1606, + 395 + ], + [ + 1598, + 391 + ], + [ + 1579, + 391 + ], + [ + 1573, + 392 + ], + [ + 1565, + 428 + ], + [ + 1598, + 426 + ], + [ + 1614, + 425 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1571, + 286 + ], + [ + 1573, + 339 + ], + [ + 1604, + 338 + ], + [ + 1601, + 284 + ], + [ + 1572, + 285 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1586, + 272 + ], + [ + 1591, + 429 + ], + [ + 1600, + 429 + ], + [ + 1598, + 354 + ], + [ + 1597, + 339 + ], + [ + 1593, + 271 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1554, + 341 + ], + [ + 1555, + 359 + ], + [ + 1559, + 359 + ], + [ + 1558, + 338 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1551, + 317 + ], + [ + 1546, + 322 + ], + [ + 1543, + 329 + ], + [ + 1544, + 337 + ], + [ + 1548, + 341 + ], + [ + 1553, + 342 + ], + [ + 1558, + 341 + ], + [ + 1563, + 334 + ], + [ + 1563, + 324 + ], + [ + 1560, + 319 + ], + [ + 1556, + 317 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1506, + 315 + ], + [ + 1502, + 315 + ], + [ + 1506, + 353 + ], + [ + 1510, + 353 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1491, + 316 + ], + [ + 1491, + 325 + ], + [ + 1507, + 325 + ], + [ + 1506, + 315 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1486, + 327 + ], + [ + 1486, + 335 + ], + [ + 1508, + 333 + ], + [ + 1507, + 325 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1515, + 369 + ], + [ + 1514, + 360 + ], + [ + 1514, + 355 + ], + [ + 1510, + 350 + ], + [ + 1506, + 350 + ], + [ + 1502, + 354 + ], + [ + 1503, + 361 + ], + [ + 1505, + 365 + ], + [ + 1505, + 372 + ], + [ + 1507, + 379 + ], + [ + 1516, + 375 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1388, + 99 + ], + [ + 1396, + 408 + ], + [ + 1413, + 409 + ], + [ + 1410, + 347 + ], + [ + 1398, + 116 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1375, + 409 + ], + [ + 1374, + 411 + ], + [ + 1375, + 438 + ], + [ + 1379, + 440 + ], + [ + 1382, + 460 + ], + [ + 1371, + 459 + ], + [ + 1370, + 517 + ], + [ + 1370, + 524 + ], + [ + 1384, + 526 + ], + [ + 1433, + 520 + ], + [ + 1430, + 496 + ], + [ + 1429, + 461 + ], + [ + 1421, + 458 + ], + [ + 1417, + 406 + ], + [ + 1400, + 404 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1560, + 233 + ], + [ + 1566, + 225 + ], + [ + 1546, + 226 + ], + [ + 1543, + 151 + ], + [ + 1580, + 149 + ], + [ + 1607, + 0 + ], + [ + 1987, + 0 + ], + [ + 2047, + 0 + ], + [ + 2048, + 555 + ], + [ + 2014, + 545 + ], + [ + 2008, + 546 + ], + [ + 1993, + 543 + ], + [ + 1991, + 540 + ], + [ + 1958, + 530 + ], + [ + 1950, + 533 + ], + [ + 1914, + 524 + ], + [ + 1874, + 514 + ], + [ + 1847, + 507 + ], + [ + 1793, + 493 + ], + [ + 1723, + 475 + ], + [ + 1712, + 474 + ], + [ + 1689, + 469 + ], + [ + 1639, + 456 + ], + [ + 1639, + 349 + ], + [ + 1637, + 273 + ], + [ + 1561, + 275 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1548, + 370 + ], + [ + 1549, + 362 + ], + [ + 1551, + 357 + ], + [ + 1555, + 356 + ], + [ + 1560, + 356 + ], + [ + 1562, + 360 + ], + [ + 1562, + 363 + ], + [ + 1560, + 367 + ], + [ + 1560, + 369 + ], + [ + 1564, + 371 + ], + [ + 1571, + 373 + ], + [ + 1573, + 378 + ], + [ + 1576, + 388 + ], + [ + 1577, + 393 + ], + [ + 1582, + 401 + ], + [ + 1581, + 408 + ], + [ + 1578, + 413 + ], + [ + 1575, + 415 + ], + [ + 1568, + 427 + ], + [ + 1568, + 435 + ], + [ + 1567, + 447 + ], + [ + 1564, + 461 + ], + [ + 1565, + 466 + ], + [ + 1560, + 466 + ], + [ + 1557, + 462 + ], + [ + 1553, + 443 + ], + [ + 1554, + 434 + ], + [ + 1551, + 423 + ], + [ + 1546, + 415 + ], + [ + 1541, + 415 + ], + [ + 1541, + 402 + ], + [ + 1543, + 390 + ], + [ + 1543, + 379 + ], + [ + 1545, + 373 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1495, + 364 + ], + [ + 1494, + 368 + ], + [ + 1495, + 374 + ], + [ + 1495, + 378 + ], + [ + 1488, + 384 + ], + [ + 1485, + 389 + ], + [ + 1485, + 404 + ], + [ + 1484, + 411 + ], + [ + 1486, + 415 + ], + [ + 1490, + 418 + ], + [ + 1494, + 419 + ], + [ + 1495, + 440 + ], + [ + 1497, + 458 + ], + [ + 1497, + 467 + ], + [ + 1501, + 469 + ], + [ + 1507, + 469 + ], + [ + 1507, + 464 + ], + [ + 1508, + 456 + ], + [ + 1510, + 444 + ], + [ + 1514, + 434 + ], + [ + 1516, + 385 + ], + [ + 1507, + 371 + ], + [ + 1505, + 366 + ], + [ + 1502, + 363 + ], + [ + 1498, + 363 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1523, + 351 + ], + [ + 1518, + 354 + ], + [ + 1516, + 361 + ], + [ + 1513, + 366 + ], + [ + 1510, + 372 + ], + [ + 1505, + 376 + ], + [ + 1504, + 388 + ], + [ + 1500, + 397 + ], + [ + 1498, + 412 + ], + [ + 1498, + 420 + ], + [ + 1501, + 423 + ], + [ + 1506, + 422 + ], + [ + 1507, + 421 + ], + [ + 1510, + 421 + ], + [ + 1513, + 426 + ], + [ + 1514, + 434 + ], + [ + 1515, + 441 + ], + [ + 1514, + 451 + ], + [ + 1517, + 460 + ], + [ + 1517, + 466 + ], + [ + 1518, + 468 + ], + [ + 1524, + 469 + ], + [ + 1528, + 469 + ], + [ + 1528, + 464 + ], + [ + 1528, + 462 + ], + [ + 1530, + 448 + ], + [ + 1533, + 426 + ], + [ + 1534, + 422 + ], + [ + 1539, + 422 + ], + [ + 1543, + 418 + ], + [ + 1542, + 405 + ], + [ + 1540, + 388 + ], + [ + 1539, + 376 + ], + [ + 1537, + 373 + ], + [ + 1534, + 370 + ], + [ + 1532, + 360 + ], + [ + 1528, + 354 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1658, + 494 + ], + [ + 1661, + 481 + ], + [ + 1665, + 455 + ], + [ + 1672, + 443 + ], + [ + 1667, + 421 + ], + [ + 1662, + 406 + ], + [ + 1651, + 400 + ], + [ + 1658, + 360 + ], + [ + 1657, + 346 + ], + [ + 1658, + 336 + ], + [ + 1664, + 328 + ], + [ + 1675, + 326 + ], + [ + 1677, + 319 + ], + [ + 1678, + 309 + ], + [ + 1685, + 302 + ], + [ + 1693, + 300 + ], + [ + 1701, + 305 + ], + [ + 1701, + 310 + ], + [ + 1703, + 320 + ], + [ + 1698, + 327 + ], + [ + 1695, + 330 + ], + [ + 1698, + 334 + ], + [ + 1702, + 339 + ], + [ + 1704, + 371 + ], + [ + 1703, + 396 + ], + [ + 1698, + 412 + ], + [ + 1690, + 439 + ], + [ + 1686, + 449 + ], + [ + 1689, + 468 + ], + [ + 1692, + 478 + ], + [ + 1696, + 486 + ], + [ + 1700, + 494 + ], + [ + 1700, + 497 + ], + [ + 1688, + 499 + ], + [ + 1679, + 498 + ], + [ + 1667, + 497 + ], + [ + 1658, + 497 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1542, + 33 + ], + [ + 1542, + 21 + ], + [ + 1546, + 11 + ], + [ + 1549, + 9 + ], + [ + 1582, + 4 + ], + [ + 1592, + 1 + ], + [ + 1616, + 1 + ], + [ + 1607, + 170 + ], + [ + 1596, + 169 + ], + [ + 1593, + 168 + ], + [ + 1592, + 160 + ], + [ + 1582, + 159 + ], + [ + 1580, + 148 + ], + [ + 1548, + 140 + ], + [ + 1545, + 132 + ], + [ + 1546, + 120 + ], + [ + 1553, + 115 + ], + [ + 1582, + 111 + ], + [ + 1580, + 97 + ], + [ + 1565, + 95 + ], + [ + 1546, + 88 + ], + [ + 1544, + 81 + ], + [ + 1545, + 71 + ], + [ + 1547, + 66 + ], + [ + 1551, + 63 + ], + [ + 1565, + 62 + ], + [ + 1565, + 50 + ], + [ + 1580, + 45 + ], + [ + 1546, + 36 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1639, + 616 + ], + [ + 1635, + 446 + ], + [ + 1629, + 426 + ], + [ + 1626, + 236 + ], + [ + 1625, + 154 + ], + [ + 1626, + 89 + ], + [ + 1598, + 88 + ], + [ + 1603, + 167 + ], + [ + 1608, + 432 + ], + [ + 1603, + 455 + ], + [ + 1607, + 623 + ], + [ + 1625, + 622 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1741, + 87 + ], + [ + 1643, + 91 + ], + [ + 1642, + 97 + ], + [ + 1743, + 92 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1647, + 146 + ], + [ + 1646, + 1 + ], + [ + 1591, + 1 + ], + [ + 1590, + 149 + ], + [ + 1604, + 150 + ], + [ + 1610, + 160 + ], + [ + 1635, + 156 + ], + [ + 1636, + 152 + ], + [ + 1637, + 148 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1631, + 175 + ], + [ + 1619, + 165 + ], + [ + 1609, + 162 + ], + [ + 1599, + 163 + ], + [ + 1593, + 166 + ], + [ + 1584, + 175 + ], + [ + 1576, + 193 + ], + [ + 1575, + 215 + ], + [ + 1579, + 230 + ], + [ + 1585, + 239 + ], + [ + 1595, + 246 + ], + [ + 1607, + 247 + ], + [ + 1614, + 246 + ], + [ + 1624, + 239 + ], + [ + 1635, + 223 + ], + [ + 1639, + 206 + ], + [ + 1637, + 186 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1740, + 70 + ], + [ + 1779, + 1 + ], + [ + 1660, + 1 + ], + [ + 1711, + 80 + ], + [ + 1717, + 89 + ], + [ + 1720, + 92 + ], + [ + 1727, + 92 + ], + [ + 1730, + 88 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 214, + 391 + ], + [ + 206, + 400 + ], + [ + 205, + 413 + ], + [ + 210, + 426 + ], + [ + 218, + 432 + ], + [ + 230, + 434 + ], + [ + 239, + 431 + ], + [ + 248, + 425 + ], + [ + 252, + 417 + ], + [ + 253, + 406 + ], + [ + 247, + 394 + ], + [ + 236, + 388 + ], + [ + 224, + 388 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 240, + 540 + ], + [ + 238, + 476 + ], + [ + 236, + 471 + ], + [ + 233, + 387 + ], + [ + 220, + 388 + ], + [ + 222, + 469 + ], + [ + 220, + 476 + ], + [ + 223, + 539 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000077_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000077_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..6ee6359442eba6b71067d4ed25d51ebab1d853f6 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000077_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000078_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000078_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..466840a34f9db9b1d24841cdee2e0cb41f091909 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000078_000019_gtFine_polygons.json @@ -0,0 +1,6843 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 1742, + 394 + ], + [ + 1242, + 407 + ], + [ + 0, + 379 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 447 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 1638, + 1 + ], + [ + 1648, + 267 + ], + [ + 1582, + 341 + ], + [ + 1493, + 347 + ], + [ + 953, + 209 + ], + [ + 625, + 0 + ], + [ + 1582, + 0 + ], + [ + 1638, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1392, + 449 + ], + [ + 1335, + 450 + ], + [ + 1213, + 459 + ], + [ + 1095, + 466 + ], + [ + 924, + 477 + ], + [ + 685, + 495 + ], + [ + 603, + 502 + ], + [ + 421, + 517 + ], + [ + 328, + 519 + ], + [ + 217, + 523 + ], + [ + 104, + 527 + ], + [ + 43, + 528 + ], + [ + 0, + 529 + ], + [ + 0, + 408 + ], + [ + 1420, + 403 + ], + [ + 1421, + 443 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1592, + 269 + ], + [ + 1571, + 281 + ], + [ + 1563, + 281 + ], + [ + 1557, + 288 + ], + [ + 1542, + 294 + ], + [ + 1535, + 278 + ], + [ + 1519, + 265 + ], + [ + 1483, + 233 + ], + [ + 1469, + 232 + ], + [ + 1461, + 222 + ], + [ + 1444, + 217 + ], + [ + 1454, + 448 + ], + [ + 1473, + 486 + ], + [ + 1608, + 491 + ], + [ + 1619, + 335 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1493, + 410 + ], + [ + 1488, + 404 + ], + [ + 1465, + 407 + ], + [ + 1456, + 412 + ], + [ + 1459, + 445 + ], + [ + 1487, + 457 + ], + [ + 1496, + 422 + ], + [ + 1493, + 415 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1482, + 367 + ], + [ + 1485, + 347 + ], + [ + 1465, + 343 + ], + [ + 1465, + 365 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1479, + 325 + ], + [ + 1472, + 325 + ], + [ + 1466, + 330 + ], + [ + 1465, + 338 + ], + [ + 1467, + 342 + ], + [ + 1473, + 344 + ], + [ + 1479, + 344 + ], + [ + 1483, + 344 + ], + [ + 1486, + 339 + ], + [ + 1486, + 331 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1453, + 190 + ], + [ + 1453, + 165 + ], + [ + 1456, + 157 + ], + [ + 1450, + 148 + ], + [ + 1424, + 150 + ], + [ + 1417, + 141 + ], + [ + 1363, + 74 + ], + [ + 1363, + 59 + ], + [ + 1356, + 49 + ], + [ + 1342, + 45 + ], + [ + 1342, + 40 + ], + [ + 1331, + 39 + ], + [ + 1331, + 49 + ], + [ + 1321, + 49 + ], + [ + 1321, + 43 + ], + [ + 1307, + 43 + ], + [ + 1291, + 45 + ], + [ + 1275, + 48 + ], + [ + 1275, + 55 + ], + [ + 1260, + 58 + ], + [ + 1259, + 49 + ], + [ + 1250, + 53 + ], + [ + 1251, + 77 + ], + [ + 1252, + 81 + ], + [ + 1238, + 82 + ], + [ + 1236, + 90 + ], + [ + 1235, + 146 + ], + [ + 1226, + 144 + ], + [ + 1216, + 145 + ], + [ + 1213, + 144 + ], + [ + 1207, + 148 + ], + [ + 1204, + 151 + ], + [ + 1203, + 157 + ], + [ + 1202, + 157 + ], + [ + 1198, + 147 + ], + [ + 1193, + 146 + ], + [ + 1191, + 141 + ], + [ + 1182, + 140 + ], + [ + 1182, + 135 + ], + [ + 1172, + 133 + ], + [ + 1176, + 124 + ], + [ + 1156, + 110 + ], + [ + 1161, + 110 + ], + [ + 1158, + 78 + ], + [ + 1155, + 76 + ], + [ + 1153, + 11 + ], + [ + 1161, + 7 + ], + [ + 1156, + 1 + ], + [ + 1144, + 1 + ], + [ + 1150, + 12 + ], + [ + 1150, + 83 + ], + [ + 1151, + 115 + ], + [ + 1118, + 167 + ], + [ + 989, + 146 + ], + [ + 848, + 61 + ], + [ + 670, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 510 + ], + [ + 221, + 501 + ], + [ + 249, + 496 + ], + [ + 511, + 477 + ], + [ + 660, + 466 + ], + [ + 864, + 466 + ], + [ + 1011, + 456 + ], + [ + 1073, + 455 + ], + [ + 1156, + 449 + ], + [ + 1200, + 441 + ], + [ + 1270, + 441 + ], + [ + 1324, + 441 + ], + [ + 1336, + 439 + ], + [ + 1354, + 437 + ], + [ + 1422, + 429 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1008, + 326 + ], + [ + 1007, + 254 + ], + [ + 1019, + 246 + ], + [ + 1012, + 236 + ], + [ + 973, + 172 + ], + [ + 983, + 163 + ], + [ + 988, + 144 + ], + [ + 993, + 140 + ], + [ + 1000, + 140 + ], + [ + 1008, + 149 + ], + [ + 1012, + 148 + ], + [ + 1013, + 142 + ], + [ + 1025, + 136 + ], + [ + 1041, + 133 + ], + [ + 1045, + 123 + ], + [ + 1048, + 124 + ], + [ + 1052, + 114 + ], + [ + 1057, + 112 + ], + [ + 1065, + 132 + ], + [ + 1084, + 136 + ], + [ + 1093, + 125 + ], + [ + 1101, + 129 + ], + [ + 1108, + 129 + ], + [ + 1122, + 116 + ], + [ + 1127, + 117 + ], + [ + 1143, + 103 + ], + [ + 1154, + 104 + ], + [ + 1159, + 112 + ], + [ + 1169, + 108 + ], + [ + 1162, + 122 + ], + [ + 1178, + 125 + ], + [ + 1190, + 175 + ], + [ + 1216, + 175 + ], + [ + 1245, + 132 + ], + [ + 1273, + 108 + ], + [ + 1298, + 153 + ], + [ + 1317, + 146 + ], + [ + 1319, + 170 + ], + [ + 1351, + 188 + ], + [ + 1373, + 210 + ], + [ + 1389, + 209 + ], + [ + 1398, + 216 + ], + [ + 1411, + 375 + ], + [ + 1377, + 420 + ], + [ + 1336, + 440 + ], + [ + 1336, + 447 + ], + [ + 1292, + 448 + ], + [ + 1240, + 442 + ], + [ + 1198, + 442 + ], + [ + 1196, + 449 + ], + [ + 1185, + 449 + ], + [ + 1182, + 343 + ], + [ + 1167, + 342 + ], + [ + 1162, + 391 + ], + [ + 1167, + 451 + ], + [ + 1154, + 452 + ], + [ + 1153, + 385 + ], + [ + 1158, + 348 + ], + [ + 1157, + 340 + ], + [ + 1108, + 336 + ], + [ + 1101, + 455 + ], + [ + 1092, + 455 + ], + [ + 1094, + 382 + ], + [ + 1097, + 335 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1321, + 321 + ], + [ + 1321, + 380 + ], + [ + 1296, + 381 + ], + [ + 1286, + 382 + ], + [ + 1285, + 444 + ], + [ + 1288, + 448 + ], + [ + 1336, + 447 + ], + [ + 1337, + 436 + ], + [ + 1329, + 398 + ], + [ + 1326, + 322 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1357, + 373 + ], + [ + 1356, + 378 + ], + [ + 1361, + 379 + ], + [ + 1363, + 423 + ], + [ + 1397, + 425 + ], + [ + 1394, + 370 + ], + [ + 1379, + 370 + ], + [ + 1366, + 370 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1172, + 415 + ], + [ + 1170, + 412 + ], + [ + 1167, + 409 + ], + [ + 1164, + 409 + ], + [ + 1162, + 415 + ], + [ + 1162, + 430 + ], + [ + 1164, + 434 + ], + [ + 1165, + 449 + ], + [ + 1170, + 449 + ], + [ + 1172, + 435 + ], + [ + 1174, + 429 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1175, + 413 + ], + [ + 1171, + 419 + ], + [ + 1170, + 429 + ], + [ + 1172, + 440 + ], + [ + 1175, + 449 + ], + [ + 1179, + 451 + ], + [ + 1183, + 451 + ], + [ + 1185, + 442 + ], + [ + 1187, + 425 + ], + [ + 1183, + 414 + ], + [ + 1180, + 408 + ], + [ + 1176, + 407 + ], + [ + 1175, + 409 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1228, + 452 + ], + [ + 1223, + 288 + ], + [ + 1216, + 280 + ], + [ + 1217, + 452 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1305, + 307 + ], + [ + 1304, + 289 + ], + [ + 1291, + 289 + ], + [ + 1292, + 308 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1226, + 306 + ], + [ + 1218, + 310 + ], + [ + 1215, + 315 + ], + [ + 1212, + 324 + ], + [ + 1213, + 452 + ], + [ + 1218, + 452 + ], + [ + 1215, + 327 + ], + [ + 1216, + 320 + ], + [ + 1219, + 315 + ], + [ + 1224, + 310 + ], + [ + 1277, + 297 + ], + [ + 1297, + 296 + ], + [ + 1297, + 292 + ], + [ + 1278, + 294 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1395, + 425 + ], + [ + 1387, + 417 + ], + [ + 1355, + 418 + ], + [ + 1349, + 427 + ], + [ + 1345, + 430 + ], + [ + 1344, + 434 + ], + [ + 1343, + 442 + ], + [ + 1343, + 448 + ], + [ + 1346, + 453 + ], + [ + 1357, + 452 + ], + [ + 1370, + 452 + ], + [ + 1397, + 449 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1339, + 496 + ], + [ + 1343, + 485 + ], + [ + 1374, + 478 + ], + [ + 1439, + 464 + ], + [ + 1547, + 453 + ], + [ + 1578, + 449 + ], + [ + 1623, + 463 + ], + [ + 1647, + 487 + ], + [ + 1621, + 507 + ], + [ + 1456, + 505 + ], + [ + 1363, + 503 + ], + [ + 1345, + 500 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1392, + 441 + ], + [ + 1386, + 281 + ], + [ + 1395, + 247 + ], + [ + 1402, + 188 + ], + [ + 1462, + 183 + ], + [ + 1464, + 250 + ], + [ + 1466, + 371 + ], + [ + 1467, + 415 + ], + [ + 1467, + 441 + ], + [ + 1464, + 478 + ], + [ + 1406, + 482 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1330, + 559 + ], + [ + 1337, + 554 + ], + [ + 1356, + 550 + ], + [ + 1404, + 543 + ], + [ + 1457, + 539 + ], + [ + 1535, + 534 + ], + [ + 1623, + 532 + ], + [ + 2048, + 573 + ], + [ + 2048, + 949 + ], + [ + 1539, + 680 + ], + [ + 1338, + 569 + ], + [ + 1331, + 565 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1602, + 412 + ], + [ + 1599, + 346 + ], + [ + 1591, + 264 + ], + [ + 1590, + 199 + ], + [ + 1571, + 200 + ], + [ + 1566, + 195 + ], + [ + 1569, + 192 + ], + [ + 1569, + 189 + ], + [ + 1564, + 188 + ], + [ + 1564, + 127 + ], + [ + 1564, + 0 + ], + [ + 1981, + 0 + ], + [ + 1985, + 487 + ], + [ + 1642, + 483 + ], + [ + 1619, + 476 + ], + [ + 1606, + 477 + ], + [ + 1605, + 470 + ], + [ + 1599, + 470 + ], + [ + 1594, + 439 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1576, + 412 + ], + [ + 1585, + 398 + ], + [ + 1591, + 405 + ], + [ + 1597, + 390 + ], + [ + 1602, + 390 + ], + [ + 1605, + 407 + ], + [ + 1608, + 468 + ], + [ + 1599, + 471 + ], + [ + 1596, + 463 + ], + [ + 1590, + 462 + ], + [ + 1589, + 468 + ], + [ + 1586, + 469 + ], + [ + 1586, + 457 + ], + [ + 1578, + 456 + ], + [ + 1578, + 462 + ], + [ + 1575, + 463 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1676, + 216 + ], + [ + 1677, + 289 + ], + [ + 1677, + 297 + ], + [ + 1708, + 297 + ], + [ + 1707, + 213 + ], + [ + 1700, + 211 + ], + [ + 1696, + 207 + ], + [ + 1689, + 207 + ], + [ + 1685, + 209 + ], + [ + 1682, + 213 + ], + [ + 1676, + 214 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1727, + 298 + ], + [ + 1715, + 297 + ], + [ + 1714, + 210 + ], + [ + 1721, + 207 + ], + [ + 1748, + 221 + ], + [ + 1755, + 225 + ], + [ + 1753, + 229 + ], + [ + 1747, + 233 + ], + [ + 1740, + 233 + ], + [ + 1739, + 243 + ], + [ + 1751, + 248 + ], + [ + 1754, + 252 + ], + [ + 1752, + 258 + ], + [ + 1743, + 260 + ], + [ + 1734, + 262 + ], + [ + 1736, + 276 + ], + [ + 1751, + 277 + ], + [ + 1751, + 284 + ], + [ + 1749, + 288 + ], + [ + 1738, + 292 + ], + [ + 1736, + 298 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1705, + 113 + ], + [ + 1705, + 382 + ], + [ + 1704, + 398 + ], + [ + 1703, + 450 + ], + [ + 1717, + 448 + ], + [ + 1718, + 277 + ], + [ + 1717, + 191 + ], + [ + 1719, + 103 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1753, + 190 + ], + [ + 1720, + 201 + ], + [ + 1734, + 271 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1736, + 134 + ], + [ + 1729, + 149 + ], + [ + 1726, + 171 + ], + [ + 1728, + 193 + ], + [ + 1731, + 200 + ], + [ + 1740, + 199 + ], + [ + 1744, + 192 + ], + [ + 1748, + 171 + ], + [ + 1749, + 152 + ], + [ + 1747, + 138 + ], + [ + 1743, + 133 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1575, + 497 + ], + [ + 1572, + 438 + ], + [ + 1570, + 432 + ], + [ + 1569, + 294 + ], + [ + 1567, + 264 + ], + [ + 1567, + 187 + ], + [ + 1568, + 143 + ], + [ + 1571, + 117 + ], + [ + 1590, + 66 + ], + [ + 1600, + 57 + ], + [ + 1600, + 105 + ], + [ + 1603, + 106 + ], + [ + 1605, + 40 + ], + [ + 1601, + 39 + ], + [ + 1600, + 49 + ], + [ + 1595, + 52 + ], + [ + 1586, + 62 + ], + [ + 1565, + 117 + ], + [ + 1562, + 171 + ], + [ + 1563, + 268 + ], + [ + 1561, + 276 + ], + [ + 1563, + 431 + ], + [ + 1561, + 440 + ], + [ + 1560, + 498 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1622, + 105 + ], + [ + 1603, + 107 + ], + [ + 1602, + 37 + ], + [ + 1604, + 36 + ], + [ + 1613, + 36 + ], + [ + 1624, + 38 + ], + [ + 1625, + 43 + ], + [ + 1637, + 45 + ], + [ + 1641, + 49 + ], + [ + 1639, + 54 + ], + [ + 1633, + 57 + ], + [ + 1623, + 56 + ], + [ + 1623, + 64 + ], + [ + 1638, + 65 + ], + [ + 1640, + 69 + ], + [ + 1639, + 74 + ], + [ + 1636, + 77 + ], + [ + 1624, + 79 + ], + [ + 1624, + 84 + ], + [ + 1639, + 87 + ], + [ + 1636, + 95 + ], + [ + 1630, + 97 + ], + [ + 1622, + 98 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1577, + 248 + ], + [ + 1574, + 260 + ], + [ + 1574, + 271 + ], + [ + 1574, + 280 + ], + [ + 1576, + 283 + ], + [ + 1578, + 285 + ], + [ + 1581, + 285 + ], + [ + 1585, + 280 + ], + [ + 1586, + 268 + ], + [ + 1585, + 256 + ], + [ + 1582, + 249 + ], + [ + 1581, + 248 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1612, + 249 + ], + [ + 1601, + 252 + ], + [ + 1592, + 259 + ], + [ + 1591, + 272 + ], + [ + 1593, + 281 + ], + [ + 1599, + 289 + ], + [ + 1607, + 291 + ], + [ + 1618, + 291 + ], + [ + 1625, + 287 + ], + [ + 1631, + 280 + ], + [ + 1634, + 271 + ], + [ + 1631, + 262 + ], + [ + 1623, + 252 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1390, + 455 + ], + [ + 1389, + 345 + ], + [ + 1396, + 346 + ], + [ + 1399, + 457 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1471, + 444 + ], + [ + 1477, + 361 + ], + [ + 1474, + 361 + ], + [ + 1468, + 445 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 1359, + 471 + ], + [ + 1360, + 459 + ], + [ + 1367, + 449 + ], + [ + 1374, + 445 + ], + [ + 1387, + 444 + ], + [ + 1397, + 443 + ], + [ + 1415, + 424 + ], + [ + 1425, + 424 + ], + [ + 1435, + 430 + ], + [ + 1453, + 430 + ], + [ + 1463, + 430 + ], + [ + 1471, + 434 + ], + [ + 1484, + 440 + ], + [ + 1490, + 458 + ], + [ + 1487, + 472 + ], + [ + 1477, + 481 + ], + [ + 1465, + 482 + ], + [ + 1457, + 483 + ], + [ + 1447, + 482 + ], + [ + 1422, + 483 + ], + [ + 1406, + 484 + ], + [ + 1392, + 485 + ], + [ + 1380, + 486 + ], + [ + 1367, + 482 + ], + [ + 1362, + 476 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1406, + 424 + ], + [ + 1405, + 282 + ], + [ + 1392, + 282 + ], + [ + 1392, + 298 + ], + [ + 1394, + 424 + ], + [ + 1397, + 489 + ], + [ + 1408, + 489 + ], + [ + 1406, + 426 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1374, + 299 + ], + [ + 1373, + 303 + ], + [ + 1375, + 307 + ], + [ + 1388, + 312 + ], + [ + 1388, + 316 + ], + [ + 1372, + 317 + ], + [ + 1374, + 324 + ], + [ + 1387, + 327 + ], + [ + 1386, + 332 + ], + [ + 1374, + 332 + ], + [ + 1376, + 339 + ], + [ + 1387, + 343 + ], + [ + 1388, + 348 + ], + [ + 1397, + 348 + ], + [ + 1397, + 296 + ], + [ + 1389, + 295 + ], + [ + 1385, + 300 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1412, + 303 + ], + [ + 1404, + 302 + ], + [ + 1398, + 305 + ], + [ + 1394, + 309 + ], + [ + 1393, + 317 + ], + [ + 1395, + 323 + ], + [ + 1398, + 326 + ], + [ + 1403, + 328 + ], + [ + 1409, + 327 + ], + [ + 1414, + 325 + ], + [ + 1418, + 321 + ], + [ + 1419, + 315 + ], + [ + 1416, + 307 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1375, + 235 + ], + [ + 1376, + 288 + ], + [ + 1459, + 285 + ], + [ + 1460, + 229 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1414, + 420 + ], + [ + 1390, + 420 + ], + [ + 1394, + 427 + ], + [ + 1395, + 449 + ], + [ + 1397, + 455 + ], + [ + 1405, + 457 + ], + [ + 1411, + 454 + ], + [ + 1414, + 449 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1509, + 522 + ], + [ + 1508, + 510 + ], + [ + 1507, + 503 + ], + [ + 1506, + 492 + ], + [ + 1505, + 462 + ], + [ + 1488, + 466 + ], + [ + 1487, + 453 + ], + [ + 1488, + 429 + ], + [ + 1491, + 404 + ], + [ + 1495, + 389 + ], + [ + 1505, + 384 + ], + [ + 1510, + 379 + ], + [ + 1506, + 373 + ], + [ + 1505, + 364 + ], + [ + 1508, + 358 + ], + [ + 1511, + 356 + ], + [ + 1517, + 356 + ], + [ + 1522, + 358 + ], + [ + 1525, + 362 + ], + [ + 1526, + 368 + ], + [ + 1525, + 374 + ], + [ + 1522, + 376 + ], + [ + 1527, + 380 + ], + [ + 1536, + 383 + ], + [ + 1543, + 388 + ], + [ + 1553, + 428 + ], + [ + 1554, + 444 + ], + [ + 1548, + 458 + ], + [ + 1552, + 469 + ], + [ + 1556, + 506 + ], + [ + 1542, + 512 + ], + [ + 1540, + 526 + ], + [ + 1540, + 536 + ], + [ + 1527, + 535 + ], + [ + 1526, + 527 + ], + [ + 1529, + 519 + ], + [ + 1529, + 490 + ], + [ + 1522, + 463 + ], + [ + 1519, + 491 + ], + [ + 1522, + 527 + ], + [ + 1521, + 535 + ], + [ + 1506, + 536 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1663, + 164 + ], + [ + 1664, + 224 + ], + [ + 1678, + 209 + ], + [ + 1682, + 194 + ], + [ + 1680, + 180 + ], + [ + 1669, + 165 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1664, + 382 + ], + [ + 1663, + 393 + ], + [ + 1665, + 402 + ], + [ + 1663, + 411 + ], + [ + 1660, + 429 + ], + [ + 1666, + 442 + ], + [ + 1666, + 451 + ], + [ + 1669, + 470 + ], + [ + 1702, + 479 + ], + [ + 1707, + 443 + ], + [ + 1713, + 440 + ], + [ + 1720, + 424 + ], + [ + 1722, + 405 + ], + [ + 1711, + 382 + ], + [ + 1698, + 370 + ], + [ + 1688, + 365 + ], + [ + 1687, + 357 + ], + [ + 1684, + 351 + ], + [ + 1677, + 345 + ], + [ + 1667, + 347 + ], + [ + 1661, + 355 + ], + [ + 1664, + 367 + ], + [ + 1666, + 373 + ], + [ + 1669, + 376 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1619, + 381 + ], + [ + 1611, + 385 + ], + [ + 1607, + 392 + ], + [ + 1603, + 410 + ], + [ + 1600, + 419 + ], + [ + 1604, + 422 + ], + [ + 1610, + 425 + ], + [ + 1610, + 453 + ], + [ + 1613, + 470 + ], + [ + 1618, + 483 + ], + [ + 1661, + 469 + ], + [ + 1657, + 448 + ], + [ + 1655, + 424 + ], + [ + 1660, + 422 + ], + [ + 1664, + 418 + ], + [ + 1662, + 403 + ], + [ + 1657, + 386 + ], + [ + 1654, + 383 + ], + [ + 1648, + 381 + ], + [ + 1653, + 374 + ], + [ + 1651, + 360 + ], + [ + 1643, + 353 + ], + [ + 1632, + 353 + ], + [ + 1626, + 359 + ], + [ + 1623, + 370 + ], + [ + 1623, + 378 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1447, + 555 + ], + [ + 1446, + 447 + ], + [ + 1442, + 439 + ], + [ + 1440, + 288 + ], + [ + 1439, + 153 + ], + [ + 1437, + 145 + ], + [ + 1435, + 0 + ], + [ + 1427, + 0 + ], + [ + 1428, + 145 + ], + [ + 1425, + 155 + ], + [ + 1426, + 187 + ], + [ + 1429, + 438 + ], + [ + 1425, + 451 + ], + [ + 1426, + 555 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1392, + 198 + ], + [ + 1390, + 203 + ], + [ + 1392, + 212 + ], + [ + 1395, + 214 + ], + [ + 1416, + 218 + ], + [ + 1416, + 227 + ], + [ + 1397, + 232 + ], + [ + 1394, + 235 + ], + [ + 1395, + 245 + ], + [ + 1398, + 247 + ], + [ + 1416, + 250 + ], + [ + 1416, + 258 + ], + [ + 1395, + 261 + ], + [ + 1393, + 264 + ], + [ + 1394, + 275 + ], + [ + 1397, + 278 + ], + [ + 1418, + 282 + ], + [ + 1418, + 288 + ], + [ + 1421, + 289 + ], + [ + 1426, + 292 + ], + [ + 1428, + 283 + ], + [ + 1434, + 268 + ], + [ + 1428, + 190 + ], + [ + 1416, + 194 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1432, + 293 + ], + [ + 1431, + 284 + ], + [ + 1421, + 284 + ], + [ + 1417, + 195 + ], + [ + 1419, + 188 + ], + [ + 1423, + 184 + ], + [ + 1456, + 184 + ], + [ + 1456, + 282 + ], + [ + 1451, + 284 + ], + [ + 1447, + 290 + ], + [ + 1442, + 290 + ], + [ + 1441, + 293 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1530, + 54 + ], + [ + 1442, + 56 + ], + [ + 1435, + 59 + ], + [ + 1435, + 85 + ], + [ + 1444, + 86 + ], + [ + 1468, + 86 + ], + [ + 1469, + 100 + ], + [ + 1507, + 100 + ], + [ + 1507, + 83 + ], + [ + 1528, + 84 + ], + [ + 1536, + 80 + ], + [ + 1536, + 60 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1399, + 80 + ], + [ + 1397, + 89 + ], + [ + 1386, + 91 + ], + [ + 1376, + 95 + ], + [ + 1374, + 99 + ], + [ + 1376, + 104 + ], + [ + 1387, + 110 + ], + [ + 1388, + 123 + ], + [ + 1380, + 123 + ], + [ + 1375, + 128 + ], + [ + 1374, + 134 + ], + [ + 1376, + 138 + ], + [ + 1387, + 143 + ], + [ + 1387, + 155 + ], + [ + 1378, + 156 + ], + [ + 1375, + 161 + ], + [ + 1375, + 169 + ], + [ + 1377, + 172 + ], + [ + 1387, + 176 + ], + [ + 1388, + 183 + ], + [ + 1390, + 188 + ], + [ + 1388, + 192 + ], + [ + 1391, + 198 + ], + [ + 1406, + 196 + ], + [ + 1406, + 188 + ], + [ + 1414, + 188 + ], + [ + 1416, + 184 + ], + [ + 1416, + 119 + ], + [ + 1418, + 114 + ], + [ + 1423, + 113 + ], + [ + 1428, + 107 + ], + [ + 1428, + 77 + ], + [ + 1420, + 81 + ], + [ + 1415, + 80 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1524, + 210 + ], + [ + 1531, + 197 + ], + [ + 1532, + 180 + ], + [ + 1528, + 168 + ], + [ + 1519, + 158 + ], + [ + 1504, + 152 + ], + [ + 1489, + 152 + ], + [ + 1476, + 157 + ], + [ + 1466, + 167 + ], + [ + 1461, + 181 + ], + [ + 1461, + 198 + ], + [ + 1466, + 213 + ], + [ + 1474, + 221 + ], + [ + 1486, + 225 + ], + [ + 1503, + 224 + ], + [ + 1516, + 219 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1534, + 251 + ], + [ + 1498, + 217 + ], + [ + 1463, + 253 + ], + [ + 1497, + 287 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1812, + 427 + ], + [ + 1801, + 415 + ], + [ + 1815, + 404 + ], + [ + 1787, + 375 + ], + [ + 1785, + 366 + ], + [ + 1793, + 356 + ], + [ + 1804, + 320 + ], + [ + 1768, + 310 + ], + [ + 1779, + 301 + ], + [ + 1810, + 295 + ], + [ + 1817, + 306 + ], + [ + 1838, + 311 + ], + [ + 1846, + 297 + ], + [ + 1854, + 271 + ], + [ + 1881, + 256 + ], + [ + 1982, + 250 + ], + [ + 1986, + 474 + ], + [ + 1889, + 479 + ], + [ + 1859, + 440 + ], + [ + 1855, + 431 + ], + [ + 1828, + 429 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1832, + 225 + ], + [ + 1829, + 249 + ], + [ + 1866, + 251 + ], + [ + 1867, + 270 + ], + [ + 1951, + 262 + ], + [ + 1951, + 453 + ], + [ + 1955, + 503 + ], + [ + 2048, + 521 + ], + [ + 2048, + 1 + ], + [ + 1858, + 1 + ], + [ + 1856, + 108 + ], + [ + 1910, + 116 + ], + [ + 1915, + 151 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1763, + 263 + ], + [ + 1721, + 277 + ], + [ + 1702, + 279 + ], + [ + 1696, + 281 + ], + [ + 1647, + 291 + ], + [ + 1647, + 296 + ], + [ + 1644, + 470 + ], + [ + 1649, + 470 + ], + [ + 1649, + 463 + ], + [ + 1653, + 299 + ], + [ + 1657, + 294 + ], + [ + 1696, + 290 + ], + [ + 1696, + 477 + ], + [ + 1702, + 474 + ], + [ + 1704, + 289 + ], + [ + 1759, + 281 + ], + [ + 1764, + 480 + ], + [ + 1770, + 482 + ], + [ + 1768, + 279 + ], + [ + 1793, + 273 + ], + [ + 1784, + 484 + ], + [ + 1792, + 484 + ], + [ + 1799, + 272 + ], + [ + 1867, + 257 + ], + [ + 1871, + 254 + ], + [ + 1887, + 252 + ], + [ + 1889, + 473 + ], + [ + 1899, + 474 + ], + [ + 1899, + 465 + ], + [ + 1898, + 251 + ], + [ + 1950, + 241 + ], + [ + 2009, + 229 + ], + [ + 2013, + 272 + ], + [ + 2024, + 264 + ], + [ + 2023, + 226 + ], + [ + 2048, + 220 + ], + [ + 2048, + 204 + ], + [ + 2019, + 202 + ], + [ + 1949, + 224 + ], + [ + 1891, + 234 + ], + [ + 1870, + 242 + ], + [ + 1836, + 247 + ], + [ + 1793, + 253 + ], + [ + 1793, + 259 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1618, + 541 + ], + [ + 1618, + 496 + ], + [ + 1612, + 487 + ], + [ + 1614, + 474 + ], + [ + 1632, + 472 + ], + [ + 1640, + 463 + ], + [ + 1655, + 457 + ], + [ + 1670, + 456 + ], + [ + 1687, + 456 + ], + [ + 1687, + 444 + ], + [ + 1737, + 440 + ], + [ + 1736, + 444 + ], + [ + 1802, + 440 + ], + [ + 1804, + 458 + ], + [ + 1813, + 454 + ], + [ + 1807, + 438 + ], + [ + 1829, + 437 + ], + [ + 1831, + 430 + ], + [ + 1840, + 427 + ], + [ + 1848, + 429 + ], + [ + 1850, + 434 + ], + [ + 1874, + 434 + ], + [ + 1906, + 434 + ], + [ + 1918, + 433 + ], + [ + 1952, + 436 + ], + [ + 1964, + 436 + ], + [ + 2048, + 449 + ], + [ + 2048, + 631 + ], + [ + 2048, + 631 + ], + [ + 1994, + 626 + ], + [ + 1979, + 609 + ], + [ + 1958, + 604 + ], + [ + 1957, + 615 + ], + [ + 1897, + 617 + ], + [ + 1895, + 604 + ], + [ + 1830, + 596 + ], + [ + 1828, + 590 + ], + [ + 1801, + 587 + ], + [ + 1758, + 580 + ], + [ + 1755, + 571 + ], + [ + 1740, + 567 + ], + [ + 1738, + 557 + ], + [ + 1690, + 560 + ], + [ + 1672, + 554 + ], + [ + 1652, + 554 + ], + [ + 1648, + 548 + ], + [ + 1624, + 545 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 2045, + 731 + ], + [ + 2040, + 676 + ], + [ + 2015, + 606 + ], + [ + 1996, + 512 + ], + [ + 1961, + 486 + ], + [ + 1958, + 477 + ], + [ + 1964, + 422 + ], + [ + 1978, + 401 + ], + [ + 1988, + 350 + ], + [ + 1998, + 332 + ], + [ + 2016, + 314 + ], + [ + 2006, + 291 + ], + [ + 2007, + 270 + ], + [ + 2019, + 255 + ], + [ + 2039, + 250 + ], + [ + 2048, + 251 + ], + [ + 2048, + 736 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1743, + 631 + ], + [ + 1727, + 628 + ], + [ + 1722, + 616 + ], + [ + 1726, + 609 + ], + [ + 1730, + 596 + ], + [ + 1741, + 589 + ], + [ + 1755, + 588 + ], + [ + 1766, + 588 + ], + [ + 1767, + 576 + ], + [ + 1773, + 570 + ], + [ + 1789, + 569 + ], + [ + 1798, + 576 + ], + [ + 1816, + 608 + ], + [ + 1829, + 618 + ], + [ + 1842, + 652 + ], + [ + 1879, + 636 + ], + [ + 1915, + 599 + ], + [ + 1959, + 533 + ], + [ + 1976, + 498 + ], + [ + 1982, + 491 + ], + [ + 1991, + 493 + ], + [ + 1991, + 503 + ], + [ + 1968, + 550 + ], + [ + 1926, + 606 + ], + [ + 1896, + 640 + ], + [ + 1865, + 656 + ], + [ + 1846, + 661 + ], + [ + 1846, + 691 + ], + [ + 1846, + 726 + ], + [ + 1848, + 736 + ], + [ + 1843, + 742 + ], + [ + 1823, + 740 + ], + [ + 1820, + 736 + ], + [ + 1826, + 698 + ], + [ + 1816, + 672 + ], + [ + 1804, + 669 + ], + [ + 1793, + 687 + ], + [ + 1795, + 709 + ], + [ + 1790, + 731 + ], + [ + 1776, + 731 + ], + [ + 1769, + 727 + ], + [ + 1771, + 721 + ], + [ + 1755, + 706 + ], + [ + 1751, + 695 + ], + [ + 1749, + 667 + ], + [ + 1744, + 649 + ], + [ + 1746, + 633 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 997, + 326 + ], + [ + 972, + 324 + ], + [ + 947, + 325 + ], + [ + 947, + 329 + ], + [ + 951, + 332 + ], + [ + 954, + 448 + ], + [ + 951, + 457 + ], + [ + 951, + 465 + ], + [ + 997, + 465 + ], + [ + 994, + 332 + ], + [ + 998, + 331 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 877, + 444 + ], + [ + 884, + 442 + ], + [ + 884, + 434 + ], + [ + 898, + 436 + ], + [ + 908, + 429 + ], + [ + 901, + 422 + ], + [ + 911, + 420 + ], + [ + 919, + 420 + ], + [ + 921, + 389 + ], + [ + 903, + 350 + ], + [ + 913, + 349 + ], + [ + 913, + 343 + ], + [ + 918, + 344 + ], + [ + 914, + 327 + ], + [ + 907, + 320 + ], + [ + 889, + 318 + ], + [ + 889, + 316 + ], + [ + 902, + 310 + ], + [ + 888, + 299 + ], + [ + 901, + 295 + ], + [ + 891, + 285 + ], + [ + 891, + 275 + ], + [ + 896, + 263 + ], + [ + 884, + 254 + ], + [ + 894, + 252 + ], + [ + 882, + 245 + ], + [ + 886, + 240 + ], + [ + 880, + 237 + ], + [ + 872, + 235 + ], + [ + 847, + 212 + ], + [ + 835, + 202 + ], + [ + 841, + 191 + ], + [ + 845, + 175 + ], + [ + 851, + 166 + ], + [ + 836, + 158 + ], + [ + 827, + 155 + ], + [ + 831, + 147 + ], + [ + 844, + 147 + ], + [ + 869, + 142 + ], + [ + 870, + 128 + ], + [ + 878, + 107 + ], + [ + 889, + 102 + ], + [ + 905, + 106 + ], + [ + 915, + 88 + ], + [ + 925, + 112 + ], + [ + 937, + 95 + ], + [ + 948, + 62 + ], + [ + 959, + 75 + ], + [ + 971, + 68 + ], + [ + 985, + 67 + ], + [ + 992, + 72 + ], + [ + 1004, + 69 + ], + [ + 1013, + 72 + ], + [ + 999, + 83 + ], + [ + 996, + 86 + ], + [ + 1001, + 90 + ], + [ + 1001, + 92 + ], + [ + 997, + 93 + ], + [ + 990, + 90 + ], + [ + 974, + 95 + ], + [ + 978, + 101 + ], + [ + 984, + 105 + ], + [ + 993, + 102 + ], + [ + 1005, + 104 + ], + [ + 1003, + 115 + ], + [ + 988, + 120 + ], + [ + 974, + 120 + ], + [ + 969, + 132 + ], + [ + 981, + 136 + ], + [ + 983, + 144 + ], + [ + 1005, + 151 + ], + [ + 994, + 175 + ], + [ + 1011, + 220 + ], + [ + 1022, + 240 + ], + [ + 1004, + 247 + ], + [ + 977, + 242 + ], + [ + 968, + 244 + ], + [ + 966, + 252 + ], + [ + 963, + 263 + ], + [ + 961, + 267 + ], + [ + 946, + 276 + ], + [ + 964, + 288 + ], + [ + 977, + 289 + ], + [ + 966, + 304 + ], + [ + 958, + 315 + ], + [ + 943, + 323 + ], + [ + 942, + 331 + ], + [ + 951, + 338 + ], + [ + 966, + 335 + ], + [ + 957, + 345 + ], + [ + 956, + 360 + ], + [ + 944, + 371 + ], + [ + 935, + 383 + ], + [ + 932, + 401 + ], + [ + 938, + 408 + ], + [ + 939, + 417 + ], + [ + 954, + 423 + ], + [ + 957, + 434 + ], + [ + 965, + 438 + ], + [ + 967, + 451 + ], + [ + 966, + 465 + ], + [ + 927, + 470 + ], + [ + 897, + 470 + ], + [ + 882, + 466 + ], + [ + 881, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 874, + 472 + ], + [ + 858, + 41 + ], + [ + 850, + 41 + ], + [ + 855, + 472 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 842, + 476 + ], + [ + 839, + 383 + ], + [ + 836, + 383 + ], + [ + 838, + 477 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 846, + 443 + ], + [ + 829, + 443 + ], + [ + 830, + 467 + ], + [ + 846, + 466 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 845, + 429 + ], + [ + 845, + 410 + ], + [ + 831, + 405 + ], + [ + 832, + 425 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 821, + 344 + ], + [ + 822, + 390 + ], + [ + 853, + 390 + ], + [ + 852, + 344 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 744, + 373 + ], + [ + 709, + 370 + ], + [ + 643, + 374 + ], + [ + 646, + 457 + ], + [ + 648, + 480 + ], + [ + 742, + 478 + ], + [ + 756, + 384 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 589, + 416 + ], + [ + 561, + 415 + ], + [ + 548, + 416 + ], + [ + 548, + 476 + ], + [ + 591, + 473 + ], + [ + 591, + 420 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 508, + 59 + ], + [ + 493, + 53 + ], + [ + 489, + 46 + ], + [ + 456, + 38 + ], + [ + 447, + 60 + ], + [ + 453, + 87 + ], + [ + 474, + 114 + ], + [ + 469, + 135 + ], + [ + 499, + 135 + ], + [ + 475, + 154 + ], + [ + 500, + 156 + ], + [ + 523, + 144 + ], + [ + 545, + 141 + ], + [ + 535, + 160 + ], + [ + 514, + 173 + ], + [ + 538, + 182 + ], + [ + 544, + 200 + ], + [ + 557, + 203 + ], + [ + 564, + 210 + ], + [ + 591, + 209 + ], + [ + 595, + 196 + ], + [ + 591, + 188 + ], + [ + 601, + 173 + ], + [ + 611, + 182 + ], + [ + 628, + 181 + ], + [ + 633, + 191 + ], + [ + 631, + 213 + ], + [ + 624, + 209 + ], + [ + 621, + 226 + ], + [ + 608, + 216 + ], + [ + 603, + 227 + ], + [ + 607, + 249 + ], + [ + 618, + 256 + ], + [ + 623, + 266 + ], + [ + 631, + 283 + ], + [ + 631, + 309 + ], + [ + 629, + 314 + ], + [ + 628, + 329 + ], + [ + 621, + 319 + ], + [ + 614, + 317 + ], + [ + 610, + 321 + ], + [ + 614, + 324 + ], + [ + 620, + 324 + ], + [ + 623, + 338 + ], + [ + 627, + 350 + ], + [ + 625, + 379 + ], + [ + 624, + 405 + ], + [ + 601, + 413 + ], + [ + 594, + 420 + ], + [ + 587, + 403 + ], + [ + 593, + 396 + ], + [ + 586, + 389 + ], + [ + 581, + 389 + ], + [ + 575, + 402 + ], + [ + 584, + 414 + ], + [ + 568, + 410 + ], + [ + 572, + 419 + ], + [ + 588, + 424 + ], + [ + 576, + 433 + ], + [ + 576, + 444 + ], + [ + 577, + 456 + ], + [ + 579, + 459 + ], + [ + 570, + 466 + ], + [ + 582, + 484 + ], + [ + 641, + 481 + ], + [ + 656, + 483 + ], + [ + 679, + 481 + ], + [ + 678, + 475 + ], + [ + 676, + 462 + ], + [ + 668, + 454 + ], + [ + 678, + 444 + ], + [ + 676, + 438 + ], + [ + 661, + 437 + ], + [ + 651, + 429 + ], + [ + 649, + 420 + ], + [ + 642, + 419 + ], + [ + 648, + 395 + ], + [ + 649, + 383 + ], + [ + 646, + 370 + ], + [ + 649, + 334 + ], + [ + 651, + 321 + ], + [ + 658, + 311 + ], + [ + 649, + 291 + ], + [ + 649, + 268 + ], + [ + 649, + 259 + ], + [ + 657, + 234 + ], + [ + 655, + 234 + ], + [ + 647, + 242 + ], + [ + 648, + 219 + ], + [ + 648, + 212 + ], + [ + 660, + 202 + ], + [ + 654, + 193 + ], + [ + 654, + 185 + ], + [ + 664, + 189 + ], + [ + 675, + 188 + ], + [ + 675, + 194 + ], + [ + 681, + 206 + ], + [ + 695, + 208 + ], + [ + 703, + 197 + ], + [ + 710, + 200 + ], + [ + 716, + 194 + ], + [ + 721, + 202 + ], + [ + 744, + 200 + ], + [ + 751, + 206 + ], + [ + 758, + 205 + ], + [ + 774, + 206 + ], + [ + 774, + 190 + ], + [ + 801, + 188 + ], + [ + 805, + 178 + ], + [ + 793, + 173 + ], + [ + 779, + 164 + ], + [ + 785, + 157 + ], + [ + 777, + 148 + ], + [ + 788, + 138 + ], + [ + 788, + 130 + ], + [ + 804, + 117 + ], + [ + 816, + 105 + ], + [ + 833, + 95 + ], + [ + 830, + 92 + ], + [ + 810, + 95 + ], + [ + 805, + 95 + ], + [ + 777, + 99 + ], + [ + 773, + 95 + ], + [ + 778, + 91 + ], + [ + 776, + 86 + ], + [ + 761, + 84 + ], + [ + 756, + 78 + ], + [ + 738, + 77 + ], + [ + 744, + 68 + ], + [ + 756, + 68 + ], + [ + 761, + 38 + ], + [ + 745, + 40 + ], + [ + 744, + 38 + ], + [ + 736, + 39 + ], + [ + 734, + 31 + ], + [ + 722, + 30 + ], + [ + 714, + 23 + ], + [ + 709, + 9 + ], + [ + 721, + 4 + ], + [ + 721, + 1 + ], + [ + 483, + 1 + ], + [ + 477, + 10 + ], + [ + 493, + 30 + ], + [ + 503, + 43 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 84, + 509 + ], + [ + 84, + 486 + ], + [ + 90, + 477 + ], + [ + 76, + 13 + ], + [ + 71, + 2 + ], + [ + 71, + 1 + ], + [ + 93, + 1 + ], + [ + 91, + 8 + ], + [ + 86, + 13 + ], + [ + 99, + 477 + ], + [ + 104, + 485 + ], + [ + 103, + 509 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 160, + 277 + ], + [ + 157, + 283 + ], + [ + 158, + 302 + ], + [ + 161, + 309 + ], + [ + 167, + 314 + ], + [ + 168, + 315 + ], + [ + 170, + 281 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 182, + 514 + ], + [ + 181, + 460 + ], + [ + 179, + 454 + ], + [ + 173, + 305 + ], + [ + 171, + 276 + ], + [ + 166, + 288 + ], + [ + 172, + 453 + ], + [ + 169, + 462 + ], + [ + 171, + 514 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 172, + 255 + ], + [ + 166, + 258 + ], + [ + 161, + 267 + ], + [ + 161, + 278 + ], + [ + 162, + 286 + ], + [ + 168, + 294 + ], + [ + 175, + 296 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 181, + 291 + ], + [ + 166, + 290 + ], + [ + 167, + 310 + ], + [ + 181, + 310 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 159, + 319 + ], + [ + 151, + 319 + ], + [ + 147, + 323 + ], + [ + 134, + 324 + ], + [ + 134, + 329 + ], + [ + 135, + 331 + ], + [ + 148, + 335 + ], + [ + 149, + 341 + ], + [ + 135, + 343 + ], + [ + 135, + 349 + ], + [ + 149, + 353 + ], + [ + 150, + 358 + ], + [ + 133, + 359 + ], + [ + 135, + 366 + ], + [ + 148, + 370 + ], + [ + 152, + 376 + ], + [ + 169, + 377 + ], + [ + 169, + 374 + ], + [ + 161, + 373 + ], + [ + 160, + 323 + ], + [ + 167, + 323 + ], + [ + 167, + 319 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 187, + 318 + ], + [ + 173, + 318 + ], + [ + 172, + 322 + ], + [ + 169, + 324 + ], + [ + 169, + 329 + ], + [ + 169, + 331 + ], + [ + 173, + 334 + ], + [ + 172, + 341 + ], + [ + 169, + 343 + ], + [ + 169, + 349 + ], + [ + 170, + 352 + ], + [ + 173, + 353 + ], + [ + 175, + 357 + ], + [ + 170, + 362 + ], + [ + 170, + 367 + ], + [ + 172, + 369 + ], + [ + 174, + 372 + ], + [ + 176, + 377 + ], + [ + 191, + 376 + ], + [ + 190, + 320 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 714, + 138 + ], + [ + 688, + 139 + ], + [ + 690, + 196 + ], + [ + 715, + 194 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 520, + 387 + ], + [ + 518, + 312 + ], + [ + 514, + 221 + ], + [ + 517, + 208 + ], + [ + 521, + 195 + ], + [ + 531, + 186 + ], + [ + 538, + 182 + ], + [ + 649, + 154 + ], + [ + 695, + 153 + ], + [ + 694, + 148 + ], + [ + 648, + 150 + ], + [ + 536, + 178 + ], + [ + 527, + 183 + ], + [ + 518, + 191 + ], + [ + 513, + 203 + ], + [ + 510, + 218 + ], + [ + 512, + 392 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 529, + 269 + ], + [ + 516, + 268 + ], + [ + 516, + 270 + ], + [ + 521, + 273 + ], + [ + 522, + 316 + ], + [ + 518, + 318 + ], + [ + 518, + 322 + ], + [ + 534, + 316 + ], + [ + 535, + 313 + ], + [ + 544, + 312 + ], + [ + 544, + 307 + ], + [ + 535, + 304 + ], + [ + 534, + 298 + ], + [ + 543, + 296 + ], + [ + 543, + 291 + ], + [ + 532, + 288 + ], + [ + 533, + 285 + ], + [ + 544, + 281 + ], + [ + 542, + 275 + ], + [ + 533, + 273 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 495, + 285 + ], + [ + 487, + 287 + ], + [ + 481, + 298 + ], + [ + 481, + 310 + ], + [ + 484, + 315 + ], + [ + 488, + 320 + ], + [ + 498, + 320 + ], + [ + 502, + 314 + ], + [ + 505, + 305 + ], + [ + 504, + 294 + ], + [ + 500, + 288 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 492, + 348 + ], + [ + 481, + 331 + ], + [ + 494, + 317 + ], + [ + 506, + 333 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 526, + 358 + ], + [ + 515, + 365 + ], + [ + 507, + 365 + ], + [ + 509, + 319 + ], + [ + 526, + 322 + ], + [ + 526, + 326 + ], + [ + 517, + 330 + ], + [ + 517, + 336 + ], + [ + 527, + 337 + ], + [ + 527, + 343 + ], + [ + 517, + 344 + ], + [ + 517, + 351 + ], + [ + 526, + 352 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 496, + 442 + ], + [ + 490, + 442 + ], + [ + 475, + 457 + ], + [ + 477, + 466 + ], + [ + 478, + 471 + ], + [ + 472, + 478 + ], + [ + 467, + 484 + ], + [ + 466, + 502 + ], + [ + 469, + 514 + ], + [ + 478, + 522 + ], + [ + 484, + 526 + ], + [ + 505, + 526 + ], + [ + 518, + 518 + ], + [ + 527, + 515 + ], + [ + 533, + 510 + ], + [ + 544, + 493 + ], + [ + 558, + 477 + ], + [ + 554, + 489 + ], + [ + 556, + 506 + ], + [ + 560, + 511 + ], + [ + 569, + 520 + ], + [ + 581, + 526 + ], + [ + 592, + 526 + ], + [ + 600, + 523 + ], + [ + 611, + 512 + ], + [ + 614, + 494 + ], + [ + 606, + 478 + ], + [ + 596, + 471 + ], + [ + 588, + 466 + ], + [ + 573, + 462 + ], + [ + 566, + 463 + ], + [ + 566, + 454 + ], + [ + 573, + 448 + ], + [ + 573, + 443 + ], + [ + 568, + 440 + ], + [ + 567, + 428 + ], + [ + 563, + 428 + ], + [ + 551, + 434 + ], + [ + 542, + 436 + ], + [ + 542, + 438 + ], + [ + 552, + 441 + ], + [ + 554, + 448 + ], + [ + 558, + 450 + ], + [ + 558, + 455 + ], + [ + 550, + 470 + ], + [ + 541, + 487 + ], + [ + 533, + 489 + ], + [ + 518, + 480 + ], + [ + 514, + 471 + ], + [ + 512, + 458 + ] + ] + }, + { + "label": "rider", + "polygon": [ + [ + 531, + 379 + ], + [ + 522, + 378 + ], + [ + 515, + 382 + ], + [ + 512, + 390 + ], + [ + 511, + 398 + ], + [ + 505, + 404 + ], + [ + 501, + 416 + ], + [ + 499, + 429 + ], + [ + 496, + 446 + ], + [ + 501, + 457 + ], + [ + 502, + 468 + ], + [ + 519, + 469 + ], + [ + 527, + 513 + ], + [ + 536, + 512 + ], + [ + 549, + 510 + ], + [ + 552, + 508 + ], + [ + 551, + 506 + ], + [ + 544, + 504 + ], + [ + 536, + 498 + ], + [ + 536, + 486 + ], + [ + 547, + 466 + ], + [ + 550, + 459 + ], + [ + 550, + 456 + ], + [ + 541, + 448 + ], + [ + 537, + 441 + ], + [ + 542, + 439 + ], + [ + 554, + 437 + ], + [ + 563, + 432 + ], + [ + 554, + 429 + ], + [ + 541, + 417 + ], + [ + 536, + 404 + ], + [ + 533, + 396 + ], + [ + 533, + 388 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 784, + 367 + ], + [ + 777, + 365 + ], + [ + 767, + 366 + ], + [ + 765, + 370 + ], + [ + 760, + 378 + ], + [ + 744, + 544 + ], + [ + 740, + 547 + ], + [ + 728, + 547 + ], + [ + 726, + 550 + ], + [ + 729, + 554 + ], + [ + 742, + 558 + ], + [ + 759, + 546 + ], + [ + 765, + 529 + ], + [ + 773, + 510 + ], + [ + 775, + 508 + ], + [ + 778, + 508 + ], + [ + 785, + 519 + ], + [ + 789, + 530 + ], + [ + 789, + 553 + ], + [ + 780, + 559 + ], + [ + 780, + 560 + ], + [ + 804, + 563 + ], + [ + 809, + 560 + ], + [ + 808, + 535 + ], + [ + 804, + 514 + ], + [ + 798, + 501 + ], + [ + 802, + 472 + ], + [ + 808, + 468 + ], + [ + 808, + 452 + ], + [ + 803, + 413 + ], + [ + 792, + 390 + ], + [ + 791, + 378 + ], + [ + 786, + 370 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 752, + 352 + ], + [ + 741, + 355 + ], + [ + 738, + 360 + ], + [ + 738, + 368 + ], + [ + 737, + 376 + ], + [ + 740, + 382 + ], + [ + 742, + 385 + ], + [ + 748, + 388 + ], + [ + 744, + 394 + ], + [ + 740, + 402 + ], + [ + 734, + 411 + ], + [ + 733, + 417 + ], + [ + 733, + 422 + ], + [ + 729, + 439 + ], + [ + 729, + 453 + ], + [ + 732, + 463 + ], + [ + 735, + 472 + ], + [ + 732, + 481 + ], + [ + 735, + 492 + ], + [ + 736, + 498 + ], + [ + 734, + 514 + ], + [ + 738, + 526 + ], + [ + 741, + 533 + ], + [ + 740, + 554 + ], + [ + 737, + 560 + ], + [ + 726, + 562 + ], + [ + 722, + 566 + ], + [ + 724, + 569 + ], + [ + 758, + 569 + ], + [ + 758, + 557 + ], + [ + 759, + 532 + ], + [ + 770, + 536 + ], + [ + 780, + 548 + ], + [ + 776, + 554 + ], + [ + 774, + 560 + ], + [ + 778, + 560 + ], + [ + 786, + 556 + ], + [ + 798, + 536 + ], + [ + 794, + 530 + ], + [ + 780, + 521 + ], + [ + 765, + 510 + ], + [ + 758, + 503 + ], + [ + 770, + 482 + ], + [ + 779, + 463 + ], + [ + 777, + 448 + ], + [ + 779, + 428 + ], + [ + 774, + 417 + ], + [ + 770, + 399 + ], + [ + 776, + 396 + ], + [ + 767, + 382 + ], + [ + 761, + 379 + ], + [ + 763, + 374 + ], + [ + 762, + 362 + ], + [ + 760, + 355 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000080_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000080_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..6d9a4430b65d21143302db452c91f58157cafa4c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000080_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000080_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000080_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..850fbdadce2b58e4c74291be92d6989975422589 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000080_000019_gtFine_polygons.json @@ -0,0 +1,7990 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "traffic light", + "polygon": [ + [ + 681, + 426 + ], + [ + 675, + 432 + ], + [ + 672, + 442 + ], + [ + 674, + 454 + ], + [ + 675, + 459 + ], + [ + 677, + 462 + ], + [ + 680, + 463 + ], + [ + 683, + 462 + ], + [ + 684, + 460 + ], + [ + 685, + 457 + ], + [ + 705, + 458 + ], + [ + 705, + 461 + ], + [ + 708, + 462 + ], + [ + 711, + 462 + ], + [ + 713, + 460 + ], + [ + 714, + 457 + ], + [ + 735, + 458 + ], + [ + 735, + 460 + ], + [ + 737, + 461 + ], + [ + 740, + 461 + ], + [ + 746, + 455 + ], + [ + 747, + 448 + ], + [ + 738, + 438 + ], + [ + 724, + 428 + ], + [ + 721, + 427 + ], + [ + 701, + 424 + ], + [ + 688, + 425 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 353, + 505 + ], + [ + 352, + 437 + ], + [ + 353, + 431 + ], + [ + 356, + 427 + ], + [ + 357, + 422 + ], + [ + 359, + 418 + ], + [ + 364, + 418 + ], + [ + 366, + 419 + ], + [ + 367, + 424 + ], + [ + 368, + 426 + ], + [ + 370, + 428 + ], + [ + 373, + 434 + ], + [ + 374, + 439 + ], + [ + 378, + 504 + ], + [ + 369, + 511 + ], + [ + 379, + 512 + ], + [ + 383, + 518 + ], + [ + 379, + 521 + ], + [ + 346, + 524 + ], + [ + 326, + 521 + ], + [ + 329, + 517 + ], + [ + 338, + 513 + ], + [ + 356, + 510 + ], + [ + 358, + 510 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 1243, + 1 + ], + [ + 1192, + 251 + ], + [ + 971, + 396 + ], + [ + 874, + 400 + ], + [ + 666, + 198 + ], + [ + 426, + 86 + ], + [ + 414, + 0 + ], + [ + 1178, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2048, + 435 + ], + [ + 901, + 419 + ], + [ + 0, + 401 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 792, + 454 + ], + [ + 779, + 454 + ], + [ + 776, + 456 + ], + [ + 740, + 457 + ], + [ + 668, + 458 + ], + [ + 631, + 458 + ], + [ + 625, + 445 + ], + [ + 796, + 438 + ], + [ + 851, + 439 + ], + [ + 809, + 450 + ], + [ + 807, + 453 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 667, + 464 + ], + [ + 644, + 458 + ], + [ + 627, + 454 + ], + [ + 557, + 446 + ], + [ + 454, + 459 + ], + [ + 442, + 460 + ], + [ + 0, + 452 + ], + [ + 1, + 565 + ], + [ + 191, + 547 + ], + [ + 398, + 512 + ], + [ + 426, + 504 + ], + [ + 457, + 500 + ], + [ + 497, + 496 + ], + [ + 515, + 493 + ], + [ + 546, + 484 + ], + [ + 579, + 477 + ], + [ + 584, + 473 + ], + [ + 580, + 470 + ], + [ + 564, + 467 + ], + [ + 575, + 462 + ], + [ + 638, + 464 + ], + [ + 654, + 465 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1021, + 470 + ], + [ + 959, + 469 + ], + [ + 953, + 464 + ], + [ + 963, + 464 + ], + [ + 974, + 458 + ], + [ + 1068, + 446 + ], + [ + 1400, + 449 + ], + [ + 2004, + 477 + ], + [ + 2012, + 546 + ], + [ + 1745, + 551 + ], + [ + 1489, + 550 + ], + [ + 1342, + 547 + ], + [ + 1232, + 541 + ], + [ + 1150, + 531 + ], + [ + 1126, + 527 + ], + [ + 1128, + 517 + ], + [ + 1156, + 511 + ], + [ + 1178, + 500 + ], + [ + 1180, + 499 + ], + [ + 1164, + 495 + ], + [ + 1086, + 477 + ], + [ + 1052, + 473 + ], + [ + 1038, + 470 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1323, + 631 + ], + [ + 1335, + 616 + ], + [ + 1432, + 604 + ], + [ + 1536, + 598 + ], + [ + 1635, + 594 + ], + [ + 1708, + 589 + ], + [ + 1844, + 581 + ], + [ + 1904, + 575 + ], + [ + 1990, + 568 + ], + [ + 2048, + 592 + ], + [ + 2048, + 1024 + ], + [ + 1986, + 1024 + ], + [ + 1794, + 901 + ], + [ + 1598, + 783 + ], + [ + 1446, + 704 + ], + [ + 1352, + 648 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 437, + 36 + ], + [ + 436, + 1 + ], + [ + 0, + 0 + ], + [ + 0, + 512 + ], + [ + 423, + 473 + ], + [ + 446, + 470 + ], + [ + 446, + 456 + ], + [ + 645, + 451 + ], + [ + 679, + 452 + ], + [ + 776, + 451 + ], + [ + 796, + 450 + ], + [ + 820, + 448 + ], + [ + 891, + 442 + ], + [ + 965, + 447 + ], + [ + 1064, + 451 + ], + [ + 1089, + 452 + ], + [ + 1110, + 457 + ], + [ + 1345, + 480 + ], + [ + 1345, + 487 + ], + [ + 1488, + 504 + ], + [ + 1530, + 504 + ], + [ + 1621, + 505 + ], + [ + 1628, + 507 + ], + [ + 1826, + 511 + ], + [ + 2048, + 515 + ], + [ + 2048, + 1 + ], + [ + 1141, + 1 + ], + [ + 1050, + 204 + ], + [ + 1050, + 216 + ], + [ + 1043, + 227 + ], + [ + 1031, + 226 + ], + [ + 1029, + 237 + ], + [ + 1025, + 240 + ], + [ + 1024, + 243 + ], + [ + 1023, + 267 + ], + [ + 1013, + 274 + ], + [ + 931, + 341 + ], + [ + 923, + 341 + ], + [ + 920, + 350 + ], + [ + 919, + 347 + ], + [ + 910, + 352 + ], + [ + 912, + 355 + ], + [ + 904, + 358 + ], + [ + 904, + 370 + ], + [ + 900, + 377 + ], + [ + 895, + 377 + ], + [ + 870, + 328 + ], + [ + 867, + 318 + ], + [ + 861, + 318 + ], + [ + 862, + 311 + ], + [ + 859, + 308 + ], + [ + 859, + 298 + ], + [ + 856, + 293 + ], + [ + 856, + 285 + ], + [ + 851, + 284 + ], + [ + 850, + 287 + ], + [ + 850, + 293 + ], + [ + 840, + 293 + ], + [ + 838, + 288 + ], + [ + 826, + 288 + ], + [ + 825, + 280 + ], + [ + 819, + 277 + ], + [ + 811, + 275 + ], + [ + 811, + 270 + ], + [ + 809, + 270 + ], + [ + 808, + 266 + ], + [ + 802, + 264 + ], + [ + 801, + 260 + ], + [ + 795, + 256 + ], + [ + 797, + 250 + ], + [ + 792, + 255 + ], + [ + 788, + 249 + ], + [ + 788, + 233 + ], + [ + 773, + 234 + ], + [ + 773, + 223 + ], + [ + 771, + 218 + ], + [ + 761, + 220 + ], + [ + 760, + 216 + ], + [ + 754, + 216 + ], + [ + 746, + 203 + ], + [ + 740, + 200 + ], + [ + 737, + 192 + ], + [ + 735, + 178 + ], + [ + 716, + 176 + ], + [ + 709, + 169 + ], + [ + 707, + 84 + ], + [ + 675, + 61 + ], + [ + 667, + 61 + ], + [ + 651, + 39 + ], + [ + 539, + 40 + ], + [ + 441, + 41 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 871, + 431 + ], + [ + 873, + 408 + ], + [ + 873, + 395 + ], + [ + 876, + 395 + ], + [ + 883, + 393 + ], + [ + 886, + 391 + ], + [ + 893, + 391 + ], + [ + 897, + 387 + ], + [ + 894, + 381 + ], + [ + 893, + 375 + ], + [ + 892, + 367 + ], + [ + 890, + 362 + ], + [ + 892, + 361 + ], + [ + 891, + 357 + ], + [ + 889, + 355 + ], + [ + 887, + 352 + ], + [ + 887, + 346 + ], + [ + 888, + 343 + ], + [ + 885, + 342 + ], + [ + 883, + 344 + ], + [ + 880, + 342 + ], + [ + 876, + 331 + ], + [ + 875, + 333 + ], + [ + 872, + 327 + ], + [ + 867, + 327 + ], + [ + 866, + 336 + ], + [ + 865, + 339 + ], + [ + 863, + 341 + ], + [ + 858, + 338 + ], + [ + 855, + 345 + ], + [ + 855, + 358 + ], + [ + 855, + 365 + ], + [ + 854, + 367 + ], + [ + 853, + 373 + ], + [ + 853, + 381 + ], + [ + 851, + 388 + ], + [ + 852, + 393 + ], + [ + 857, + 394 + ], + [ + 862, + 396 + ], + [ + 864, + 399 + ], + [ + 871, + 395 + ], + [ + 871, + 405 + ], + [ + 870, + 413 + ], + [ + 867, + 424 + ], + [ + 867, + 431 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 838, + 398 + ], + [ + 838, + 380 + ], + [ + 826, + 380 + ], + [ + 826, + 398 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 794, + 430 + ], + [ + 793, + 433 + ], + [ + 791, + 439 + ], + [ + 793, + 442 + ], + [ + 793, + 445 + ], + [ + 796, + 447 + ], + [ + 796, + 443 + ], + [ + 797, + 440 + ], + [ + 797, + 435 + ], + [ + 796, + 432 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 801, + 430 + ], + [ + 799, + 435 + ], + [ + 799, + 439 + ], + [ + 799, + 442 + ], + [ + 799, + 448 + ], + [ + 802, + 447 + ], + [ + 803, + 442 + ], + [ + 805, + 437 + ], + [ + 804, + 433 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 785, + 445 + ], + [ + 783, + 454 + ], + [ + 808, + 454 + ], + [ + 809, + 449 + ], + [ + 807, + 445 + ], + [ + 802, + 443 + ], + [ + 792, + 443 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 789, + 411 + ], + [ + 789, + 422 + ], + [ + 795, + 422 + ], + [ + 795, + 411 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 775, + 338 + ], + [ + 774, + 357 + ], + [ + 783, + 357 + ], + [ + 784, + 338 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 713, + 430 + ], + [ + 713, + 367 + ], + [ + 714, + 358 + ], + [ + 717, + 354 + ], + [ + 721, + 351 + ], + [ + 726, + 350 + ], + [ + 764, + 341 + ], + [ + 778, + 341 + ], + [ + 778, + 343 + ], + [ + 763, + 343 + ], + [ + 722, + 353 + ], + [ + 719, + 355 + ], + [ + 717, + 358 + ], + [ + 715, + 363 + ], + [ + 715, + 430 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 724, + 397 + ], + [ + 718, + 397 + ], + [ + 717, + 412 + ], + [ + 720, + 411 + ], + [ + 723, + 409 + ], + [ + 723, + 407 + ], + [ + 720, + 406 + ], + [ + 718, + 406 + ], + [ + 724, + 405 + ], + [ + 724, + 402 + ], + [ + 719, + 401 + ], + [ + 724, + 400 + ], + [ + 725, + 399 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 708, + 394 + ], + [ + 708, + 412 + ], + [ + 718, + 412 + ], + [ + 719, + 394 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 543, + 319 + ], + [ + 540, + 323 + ], + [ + 540, + 325 + ], + [ + 544, + 326 + ], + [ + 544, + 328 + ], + [ + 540, + 329 + ], + [ + 540, + 331 + ], + [ + 544, + 333 + ], + [ + 540, + 335 + ], + [ + 540, + 337 + ], + [ + 544, + 338 + ], + [ + 545, + 341 + ], + [ + 550, + 341 + ], + [ + 550, + 318 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 550, + 460 + ], + [ + 550, + 426 + ], + [ + 546, + 169 + ], + [ + 543, + 168 + ], + [ + 545, + 389 + ], + [ + 547, + 461 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 522, + 166 + ], + [ + 522, + 170 + ], + [ + 531, + 171 + ], + [ + 540, + 171 + ], + [ + 541, + 168 + ], + [ + 544, + 168 + ], + [ + 547, + 170 + ], + [ + 556, + 170 + ], + [ + 564, + 170 + ], + [ + 563, + 167 + ], + [ + 549, + 164 + ], + [ + 545, + 165 + ], + [ + 543, + 166 + ], + [ + 541, + 166 + ], + [ + 537, + 165 + ], + [ + 525, + 165 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 445, + 434 + ], + [ + 462, + 434 + ], + [ + 466, + 437 + ], + [ + 466, + 425 + ], + [ + 471, + 424 + ], + [ + 475, + 425 + ], + [ + 477, + 429 + ], + [ + 480, + 426 + ], + [ + 488, + 424 + ], + [ + 488, + 417 + ], + [ + 491, + 419 + ], + [ + 491, + 413 + ], + [ + 494, + 411 + ], + [ + 495, + 390 + ], + [ + 502, + 390 + ], + [ + 504, + 409 + ], + [ + 503, + 417 + ], + [ + 499, + 418 + ], + [ + 499, + 423 + ], + [ + 502, + 427 + ], + [ + 503, + 430 + ], + [ + 501, + 434 + ], + [ + 500, + 436 + ], + [ + 500, + 440 + ], + [ + 503, + 439 + ], + [ + 504, + 436 + ], + [ + 508, + 436 + ], + [ + 511, + 440 + ], + [ + 517, + 440 + ], + [ + 520, + 437 + ], + [ + 521, + 430 + ], + [ + 526, + 417 + ], + [ + 532, + 406 + ], + [ + 534, + 408 + ], + [ + 537, + 411 + ], + [ + 538, + 401 + ], + [ + 534, + 395 + ], + [ + 528, + 394 + ], + [ + 525, + 393 + ], + [ + 526, + 370 + ], + [ + 532, + 370 + ], + [ + 533, + 386 + ], + [ + 538, + 389 + ], + [ + 541, + 385 + ], + [ + 545, + 385 + ], + [ + 550, + 385 + ], + [ + 552, + 388 + ], + [ + 556, + 390 + ], + [ + 559, + 409 + ], + [ + 564, + 411 + ], + [ + 569, + 417 + ], + [ + 574, + 421 + ], + [ + 579, + 425 + ], + [ + 582, + 408 + ], + [ + 588, + 406 + ], + [ + 589, + 399 + ], + [ + 622, + 400 + ], + [ + 627, + 402 + ], + [ + 640, + 415 + ], + [ + 641, + 460 + ], + [ + 632, + 460 + ], + [ + 566, + 460 + ], + [ + 555, + 461 + ], + [ + 521, + 462 + ], + [ + 514, + 463 + ], + [ + 462, + 466 + ], + [ + 446, + 467 + ], + [ + 445, + 466 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 458, + 405 + ], + [ + 447, + 405 + ], + [ + 448, + 411 + ], + [ + 458, + 411 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 451, + 394 + ], + [ + 448, + 397 + ], + [ + 448, + 402 + ], + [ + 449, + 404 + ], + [ + 451, + 405 + ], + [ + 454, + 405 + ], + [ + 457, + 404 + ], + [ + 459, + 400 + ], + [ + 458, + 396 + ], + [ + 455, + 393 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 464, + 388 + ], + [ + 466, + 390 + ], + [ + 468, + 389 + ], + [ + 468, + 394 + ], + [ + 464, + 394 + ], + [ + 464, + 408 + ], + [ + 469, + 408 + ], + [ + 469, + 426 + ], + [ + 471, + 426 + ], + [ + 471, + 408 + ], + [ + 474, + 408 + ], + [ + 474, + 394 + ], + [ + 471, + 393 + ], + [ + 471, + 388 + ], + [ + 474, + 384 + ], + [ + 471, + 382 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 522, + 366 + ], + [ + 504, + 392 + ], + [ + 537, + 393 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 520, + 472 + ], + [ + 524, + 366 + ], + [ + 522, + 366 + ], + [ + 518, + 473 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 529, + 468 + ], + [ + 529, + 391 + ], + [ + 527, + 391 + ], + [ + 527, + 468 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 528, + 479 + ], + [ + 528, + 470 + ], + [ + 508, + 469 + ], + [ + 508, + 480 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 580, + 402 + ], + [ + 566, + 376 + ], + [ + 550, + 401 + ], + [ + 579, + 402 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 564, + 463 + ], + [ + 567, + 375 + ], + [ + 565, + 375 + ], + [ + 561, + 464 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 555, + 465 + ], + [ + 557, + 460 + ], + [ + 567, + 459 + ], + [ + 570, + 466 + ], + [ + 569, + 471 + ], + [ + 555, + 471 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 596, + 381 + ], + [ + 590, + 381 + ], + [ + 590, + 394 + ], + [ + 596, + 394 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 591, + 369 + ], + [ + 591, + 399 + ], + [ + 593, + 399 + ], + [ + 593, + 369 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 639, + 379 + ], + [ + 638, + 414 + ], + [ + 640, + 416 + ], + [ + 641, + 379 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 644, + 387 + ], + [ + 641, + 385 + ], + [ + 637, + 385 + ], + [ + 638, + 404 + ], + [ + 647, + 403 + ], + [ + 647, + 400 + ], + [ + 643, + 398 + ], + [ + 643, + 398 + ], + [ + 647, + 397 + ], + [ + 648, + 393 + ], + [ + 643, + 392 + ], + [ + 648, + 390 + ], + [ + 648, + 387 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 424, + 434 + ], + [ + 424, + 428 + ], + [ + 419, + 427 + ], + [ + 418, + 421 + ], + [ + 415, + 419 + ], + [ + 412, + 419 + ], + [ + 411, + 422 + ], + [ + 411, + 427 + ], + [ + 407, + 428 + ], + [ + 406, + 431 + ], + [ + 402, + 430 + ], + [ + 397, + 431 + ], + [ + 392, + 434 + ], + [ + 389, + 497 + ], + [ + 406, + 498 + ], + [ + 413, + 503 + ], + [ + 401, + 506 + ], + [ + 398, + 510 + ], + [ + 397, + 513 + ], + [ + 398, + 514 + ], + [ + 425, + 513 + ], + [ + 435, + 511 + ], + [ + 434, + 508 + ], + [ + 419, + 505 + ], + [ + 419, + 503 + ], + [ + 429, + 497 + ], + [ + 429, + 491 + ], + [ + 431, + 490 + ], + [ + 429, + 435 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 380, + 397 + ], + [ + 381, + 382 + ], + [ + 407, + 357 + ], + [ + 402, + 244 + ], + [ + 339, + 244 + ], + [ + 341, + 277 + ], + [ + 308, + 277 + ], + [ + 315, + 289 + ], + [ + 325, + 289 + ], + [ + 324, + 297 + ], + [ + 307, + 298 + ], + [ + 306, + 344 + ], + [ + 342, + 346 + ], + [ + 342, + 360 + ], + [ + 345, + 363 + ], + [ + 346, + 396 + ], + [ + 355, + 397 + ], + [ + 354, + 370 + ], + [ + 369, + 382 + ], + [ + 369, + 397 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 98, + 363 + ], + [ + 0, + 362 + ], + [ + 0, + 539 + ], + [ + 93, + 546 + ], + [ + 229, + 530 + ], + [ + 306, + 518 + ], + [ + 342, + 512 + ], + [ + 391, + 512 + ], + [ + 431, + 498 + ], + [ + 427, + 435 + ], + [ + 397, + 435 + ], + [ + 400, + 393 + ], + [ + 274, + 385 + ], + [ + 217, + 379 + ], + [ + 100, + 372 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 78, + 445 + ], + [ + 80, + 439 + ], + [ + 79, + 435 + ], + [ + 74, + 433 + ], + [ + 69, + 433 + ], + [ + 66, + 435 + ], + [ + 65, + 439 + ], + [ + 65, + 443 + ], + [ + 65, + 446 + ], + [ + 58, + 446 + ], + [ + 57, + 453 + ], + [ + 31, + 452 + ], + [ + 0, + 452 + ], + [ + 0, + 544 + ], + [ + 69, + 548 + ], + [ + 40, + 557 + ], + [ + 31, + 562 + ], + [ + 29, + 565 + ], + [ + 106, + 561 + ], + [ + 116, + 561 + ], + [ + 122, + 556 + ], + [ + 95, + 546 + ], + [ + 98, + 455 + ], + [ + 87, + 453 + ], + [ + 86, + 447 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 219, + 425 + ], + [ + 218, + 420 + ], + [ + 221, + 415 + ], + [ + 228, + 415 + ], + [ + 231, + 418 + ], + [ + 231, + 421 + ], + [ + 231, + 425 + ], + [ + 235, + 426 + ], + [ + 238, + 428 + ], + [ + 238, + 434 + ], + [ + 237, + 437 + ], + [ + 241, + 437 + ], + [ + 247, + 522 + ], + [ + 237, + 529 + ], + [ + 248, + 531 + ], + [ + 255, + 537 + ], + [ + 255, + 542 + ], + [ + 193, + 548 + ], + [ + 194, + 541 + ], + [ + 201, + 537 + ], + [ + 222, + 531 + ], + [ + 222, + 528 + ], + [ + 217, + 524 + ], + [ + 208, + 436 + ], + [ + 213, + 436 + ], + [ + 213, + 431 + ], + [ + 215, + 428 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 322, + 513 + ], + [ + 318, + 436 + ], + [ + 317, + 434 + ], + [ + 316, + 429 + ], + [ + 314, + 427 + ], + [ + 310, + 426 + ], + [ + 311, + 421 + ], + [ + 309, + 417 + ], + [ + 304, + 416 + ], + [ + 301, + 417 + ], + [ + 299, + 421 + ], + [ + 299, + 424 + ], + [ + 300, + 427 + ], + [ + 297, + 428 + ], + [ + 296, + 433 + ], + [ + 296, + 437 + ], + [ + 294, + 439 + ], + [ + 298, + 513 + ], + [ + 302, + 517 + ], + [ + 288, + 528 + ], + [ + 288, + 530 + ], + [ + 317, + 531 + ], + [ + 340, + 526 + ], + [ + 339, + 522 + ], + [ + 318, + 520 + ], + [ + 318, + 516 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 893, + 430 + ], + [ + 890, + 428 + ], + [ + 846, + 428 + ], + [ + 827, + 437 + ], + [ + 812, + 440 + ], + [ + 808, + 443 + ], + [ + 807, + 448 + ], + [ + 808, + 451 + ], + [ + 816, + 452 + ], + [ + 818, + 454 + ], + [ + 821, + 456 + ], + [ + 827, + 456 + ], + [ + 830, + 452 + ], + [ + 837, + 453 + ], + [ + 868, + 452 + ], + [ + 869, + 454 + ], + [ + 872, + 456 + ], + [ + 877, + 456 + ], + [ + 880, + 455 + ], + [ + 881, + 452 + ], + [ + 888, + 451 + ], + [ + 897, + 431 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 961, + 409 + ], + [ + 953, + 408 + ], + [ + 949, + 404 + ], + [ + 945, + 397 + ], + [ + 942, + 391 + ], + [ + 948, + 385 + ], + [ + 952, + 380 + ], + [ + 958, + 378 + ], + [ + 967, + 379 + ], + [ + 972, + 381 + ], + [ + 976, + 382 + ], + [ + 977, + 376 + ], + [ + 971, + 370 + ], + [ + 961, + 364 + ], + [ + 942, + 358 + ], + [ + 932, + 362 + ], + [ + 935, + 357 + ], + [ + 934, + 351 + ], + [ + 927, + 346 + ], + [ + 931, + 339 + ], + [ + 927, + 333 + ], + [ + 923, + 328 + ], + [ + 924, + 323 + ], + [ + 928, + 322 + ], + [ + 933, + 320 + ], + [ + 933, + 318 + ], + [ + 940, + 316 + ], + [ + 945, + 308 + ], + [ + 940, + 306 + ], + [ + 942, + 299 + ], + [ + 947, + 299 + ], + [ + 943, + 298 + ], + [ + 941, + 293 + ], + [ + 949, + 293 + ], + [ + 949, + 290 + ], + [ + 949, + 287 + ], + [ + 953, + 285 + ], + [ + 956, + 277 + ], + [ + 959, + 276 + ], + [ + 965, + 280 + ], + [ + 965, + 272 + ], + [ + 963, + 270 + ], + [ + 964, + 267 + ], + [ + 966, + 266 + ], + [ + 968, + 267 + ], + [ + 971, + 264 + ], + [ + 971, + 262 + ], + [ + 976, + 255 + ], + [ + 980, + 257 + ], + [ + 982, + 258 + ], + [ + 989, + 251 + ], + [ + 995, + 248 + ], + [ + 996, + 254 + ], + [ + 1000, + 251 + ], + [ + 1008, + 254 + ], + [ + 1009, + 258 + ], + [ + 1016, + 257 + ], + [ + 1016, + 259 + ], + [ + 1013, + 263 + ], + [ + 1014, + 264 + ], + [ + 1018, + 266 + ], + [ + 1019, + 270 + ], + [ + 1021, + 271 + ], + [ + 1023, + 268 + ], + [ + 1021, + 275 + ], + [ + 1014, + 281 + ], + [ + 1019, + 282 + ], + [ + 1017, + 287 + ], + [ + 1013, + 290 + ], + [ + 1012, + 292 + ], + [ + 1014, + 293 + ], + [ + 1017, + 293 + ], + [ + 1017, + 314 + ], + [ + 1000, + 392 + ], + [ + 998, + 402 + ], + [ + 997, + 408 + ], + [ + 999, + 435 + ], + [ + 973, + 435 + ], + [ + 970, + 427 + ], + [ + 969, + 423 + ], + [ + 959, + 419 + ], + [ + 959, + 418 + ], + [ + 963, + 415 + ], + [ + 966, + 414 + ], + [ + 967, + 411 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 953, + 425 + ], + [ + 968, + 424 + ], + [ + 971, + 418 + ], + [ + 971, + 407 + ], + [ + 974, + 404 + ], + [ + 978, + 405 + ], + [ + 977, + 398 + ], + [ + 977, + 385 + ], + [ + 978, + 380 + ], + [ + 979, + 313 + ], + [ + 1038, + 314 + ], + [ + 1037, + 388 + ], + [ + 1031, + 390 + ], + [ + 1032, + 394 + ], + [ + 1036, + 396 + ], + [ + 1046, + 399 + ], + [ + 1052, + 397 + ], + [ + 1052, + 348 + ], + [ + 1069, + 348 + ], + [ + 1083, + 351 + ], + [ + 1092, + 352 + ], + [ + 1093, + 371 + ], + [ + 1095, + 383 + ], + [ + 1072, + 385 + ], + [ + 1072, + 406 + ], + [ + 1073, + 428 + ], + [ + 1078, + 447 + ], + [ + 1077, + 458 + ], + [ + 1070, + 466 + ], + [ + 1053, + 465 + ], + [ + 972, + 463 + ], + [ + 969, + 461 + ], + [ + 969, + 456 + ], + [ + 959, + 455 + ], + [ + 949, + 428 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 946, + 370 + ], + [ + 946, + 385 + ], + [ + 952, + 385 + ], + [ + 952, + 370 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 982, + 381 + ], + [ + 976, + 381 + ], + [ + 976, + 397 + ], + [ + 982, + 397 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 984, + 393 + ], + [ + 991, + 400 + ], + [ + 996, + 393 + ], + [ + 990, + 386 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1020, + 400 + ], + [ + 1020, + 387 + ], + [ + 1031, + 387 + ], + [ + 1031, + 400 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1074, + 371 + ], + [ + 1073, + 351 + ], + [ + 1090, + 351 + ], + [ + 1092, + 372 + ], + [ + 1075, + 372 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1057, + 362 + ], + [ + 1057, + 385 + ], + [ + 1071, + 384 + ], + [ + 1071, + 362 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 966, + 322 + ], + [ + 915, + 311 + ], + [ + 901, + 310 + ], + [ + 901, + 309 + ], + [ + 916, + 309 + ], + [ + 965, + 319 + ], + [ + 971, + 321 + ], + [ + 975, + 325 + ], + [ + 980, + 332 + ], + [ + 980, + 336 + ], + [ + 976, + 331 + ], + [ + 971, + 325 + ], + [ + 968, + 323 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1056, + 403 + ], + [ + 1056, + 384 + ], + [ + 1071, + 384 + ], + [ + 1071, + 403 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 890, + 302 + ], + [ + 889, + 328 + ], + [ + 903, + 328 + ], + [ + 903, + 302 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 914, + 420 + ], + [ + 898, + 420 + ], + [ + 894, + 422 + ], + [ + 891, + 432 + ], + [ + 884, + 433 + ], + [ + 883, + 434 + ], + [ + 883, + 436 + ], + [ + 888, + 439 + ], + [ + 886, + 448 + ], + [ + 885, + 468 + ], + [ + 887, + 470 + ], + [ + 894, + 469 + ], + [ + 894, + 464 + ], + [ + 900, + 464 + ], + [ + 916, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 897, + 474 + ], + [ + 897, + 461 + ], + [ + 898, + 448 + ], + [ + 900, + 444 + ], + [ + 895, + 441 + ], + [ + 895, + 438 + ], + [ + 898, + 438 + ], + [ + 901, + 438 + ], + [ + 906, + 428 + ], + [ + 909, + 422 + ], + [ + 912, + 421 + ], + [ + 924, + 419 + ], + [ + 942, + 420 + ], + [ + 952, + 421 + ], + [ + 956, + 424 + ], + [ + 959, + 432 + ], + [ + 960, + 437 + ], + [ + 963, + 439 + ], + [ + 965, + 441 + ], + [ + 963, + 444 + ], + [ + 961, + 445 + ], + [ + 962, + 454 + ], + [ + 962, + 463 + ], + [ + 962, + 477 + ], + [ + 949, + 476 + ], + [ + 949, + 472 + ], + [ + 909, + 470 + ], + [ + 909, + 476 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1108, + 429 + ], + [ + 1104, + 389 + ], + [ + 1102, + 355 + ], + [ + 1098, + 350 + ], + [ + 1090, + 340 + ], + [ + 1085, + 334 + ], + [ + 1072, + 341 + ], + [ + 1062, + 336 + ], + [ + 1066, + 314 + ], + [ + 1043, + 323 + ], + [ + 1036, + 320 + ], + [ + 1030, + 308 + ], + [ + 1038, + 298 + ], + [ + 1047, + 287 + ], + [ + 1053, + 280 + ], + [ + 1067, + 255 + ], + [ + 1059, + 252 + ], + [ + 1065, + 243 + ], + [ + 1071, + 231 + ], + [ + 1062, + 218 + ], + [ + 1048, + 223 + ], + [ + 1042, + 229 + ], + [ + 1040, + 228 + ], + [ + 1046, + 217 + ], + [ + 1055, + 214 + ], + [ + 1049, + 207 + ], + [ + 1041, + 213 + ], + [ + 1028, + 213 + ], + [ + 1040, + 200 + ], + [ + 1048, + 199 + ], + [ + 1040, + 187 + ], + [ + 1019, + 176 + ], + [ + 1003, + 176 + ], + [ + 1001, + 181 + ], + [ + 991, + 184 + ], + [ + 994, + 166 + ], + [ + 988, + 167 + ], + [ + 986, + 160 + ], + [ + 991, + 156 + ], + [ + 981, + 153 + ], + [ + 971, + 158 + ], + [ + 970, + 168 + ], + [ + 973, + 175 + ], + [ + 962, + 171 + ], + [ + 955, + 153 + ], + [ + 959, + 140 + ], + [ + 928, + 140 + ], + [ + 920, + 151 + ], + [ + 911, + 160 + ], + [ + 907, + 173 + ], + [ + 910, + 186 + ], + [ + 901, + 193 + ], + [ + 879, + 205 + ], + [ + 844, + 208 + ], + [ + 848, + 192 + ], + [ + 835, + 193 + ], + [ + 809, + 178 + ], + [ + 809, + 167 + ], + [ + 826, + 155 + ], + [ + 830, + 135 + ], + [ + 826, + 124 + ], + [ + 843, + 118 + ], + [ + 836, + 110 + ], + [ + 822, + 106 + ], + [ + 806, + 95 + ], + [ + 814, + 91 + ], + [ + 834, + 91 + ], + [ + 830, + 84 + ], + [ + 820, + 62 + ], + [ + 837, + 55 + ], + [ + 843, + 59 + ], + [ + 860, + 57 + ], + [ + 858, + 36 + ], + [ + 854, + 21 + ], + [ + 858, + 22 + ], + [ + 862, + 33 + ], + [ + 872, + 39 + ], + [ + 880, + 37 + ], + [ + 879, + 49 + ], + [ + 882, + 49 + ], + [ + 885, + 40 + ], + [ + 899, + 49 + ], + [ + 907, + 59 + ], + [ + 912, + 61 + ], + [ + 912, + 58 + ], + [ + 907, + 48 + ], + [ + 923, + 33 + ], + [ + 920, + 30 + ], + [ + 914, + 27 + ], + [ + 910, + 34 + ], + [ + 893, + 38 + ], + [ + 894, + 36 + ], + [ + 903, + 26 + ], + [ + 895, + 24 + ], + [ + 890, + 24 + ], + [ + 886, + 19 + ], + [ + 874, + 13 + ], + [ + 890, + 12 + ], + [ + 901, + 10 + ], + [ + 889, + 4 + ], + [ + 890, + 0 + ], + [ + 1178, + 0 + ], + [ + 1200, + 8 + ], + [ + 1208, + 15 + ], + [ + 1212, + 17 + ], + [ + 1227, + 21 + ], + [ + 1227, + 25 + ], + [ + 1215, + 28 + ], + [ + 1202, + 26 + ], + [ + 1201, + 42 + ], + [ + 1195, + 44 + ], + [ + 1179, + 31 + ], + [ + 1187, + 25 + ], + [ + 1156, + 18 + ], + [ + 1139, + 16 + ], + [ + 1121, + 30 + ], + [ + 1129, + 36 + ], + [ + 1141, + 35 + ], + [ + 1154, + 45 + ], + [ + 1147, + 55 + ], + [ + 1165, + 58 + ], + [ + 1185, + 79 + ], + [ + 1178, + 84 + ], + [ + 1161, + 71 + ], + [ + 1155, + 78 + ], + [ + 1133, + 73 + ], + [ + 1133, + 83 + ], + [ + 1155, + 102 + ], + [ + 1160, + 116 + ], + [ + 1125, + 113 + ], + [ + 1118, + 118 + ], + [ + 1110, + 132 + ], + [ + 1135, + 137 + ], + [ + 1163, + 128 + ], + [ + 1163, + 132 + ], + [ + 1146, + 149 + ], + [ + 1129, + 153 + ], + [ + 1116, + 153 + ], + [ + 1101, + 161 + ], + [ + 1099, + 166 + ], + [ + 1118, + 179 + ], + [ + 1102, + 181 + ], + [ + 1102, + 185 + ], + [ + 1100, + 190 + ], + [ + 1099, + 197 + ], + [ + 1101, + 208 + ], + [ + 1101, + 217 + ], + [ + 1102, + 233 + ], + [ + 1105, + 239 + ], + [ + 1105, + 244 + ], + [ + 1104, + 247 + ], + [ + 1106, + 253 + ], + [ + 1106, + 266 + ], + [ + 1110, + 280 + ], + [ + 1110, + 292 + ], + [ + 1122, + 284 + ], + [ + 1127, + 284 + ], + [ + 1137, + 290 + ], + [ + 1143, + 298 + ], + [ + 1141, + 300 + ], + [ + 1128, + 290 + ], + [ + 1123, + 293 + ], + [ + 1120, + 297 + ], + [ + 1110, + 300 + ], + [ + 1113, + 321 + ], + [ + 1115, + 337 + ], + [ + 1117, + 345 + ], + [ + 1116, + 356 + ], + [ + 1120, + 396 + ], + [ + 1123, + 431 + ], + [ + 1121, + 438 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1150, + 425 + ], + [ + 1149, + 417 + ], + [ + 1147, + 414 + ], + [ + 1147, + 408 + ], + [ + 1144, + 405 + ], + [ + 1140, + 406 + ], + [ + 1138, + 411 + ], + [ + 1137, + 414 + ], + [ + 1133, + 417 + ], + [ + 1131, + 421 + ], + [ + 1130, + 427 + ], + [ + 1146, + 431 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1208, + 424 + ], + [ + 1202, + 411 + ], + [ + 1200, + 407 + ], + [ + 1200, + 402 + ], + [ + 1199, + 398 + ], + [ + 1195, + 396 + ], + [ + 1192, + 399 + ], + [ + 1189, + 402 + ], + [ + 1187, + 405 + ], + [ + 1183, + 408 + ], + [ + 1179, + 411 + ], + [ + 1177, + 416 + ], + [ + 1174, + 422 + ], + [ + 1192, + 428 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1219, + 405 + ], + [ + 1214, + 404 + ], + [ + 1210, + 406 + ], + [ + 1209, + 414 + ], + [ + 1204, + 417 + ], + [ + 1199, + 425 + ], + [ + 1213, + 439 + ], + [ + 1233, + 431 + ], + [ + 1231, + 414 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1166, + 384 + ], + [ + 1166, + 423 + ], + [ + 1168, + 423 + ], + [ + 1168, + 383 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1095, + 488 + ], + [ + 1085, + 495 + ], + [ + 1070, + 496 + ], + [ + 1120, + 527 + ], + [ + 1126, + 526 + ], + [ + 1129, + 517 + ], + [ + 1160, + 511 + ], + [ + 1155, + 497 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1153, + 366 + ], + [ + 1153, + 386 + ], + [ + 1176, + 386 + ], + [ + 1176, + 365 + ], + [ + 1156, + 362 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1153, + 331 + ], + [ + 1153, + 367 + ], + [ + 1178, + 366 + ], + [ + 1176, + 331 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1182, + 318 + ], + [ + 1170, + 323 + ], + [ + 1165, + 327 + ], + [ + 1162, + 338 + ], + [ + 1163, + 348 + ], + [ + 1167, + 356 + ], + [ + 1175, + 360 + ], + [ + 1183, + 363 + ], + [ + 1193, + 361 + ], + [ + 1200, + 354 + ], + [ + 1203, + 347 + ], + [ + 1203, + 334 + ], + [ + 1199, + 327 + ], + [ + 1190, + 320 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1241, + 325 + ], + [ + 1238, + 333 + ], + [ + 1233, + 338 + ], + [ + 1226, + 341 + ], + [ + 1218, + 338 + ], + [ + 1216, + 335 + ], + [ + 1213, + 322 + ], + [ + 1242, + 321 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1224, + 353 + ], + [ + 1224, + 399 + ], + [ + 1229, + 399 + ], + [ + 1229, + 351 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1214, + 371 + ], + [ + 1214, + 384 + ], + [ + 1230, + 385 + ], + [ + 1230, + 371 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1211, + 339 + ], + [ + 1212, + 356 + ], + [ + 1241, + 355 + ], + [ + 1240, + 338 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1087, + 482 + ], + [ + 1072, + 485 + ], + [ + 1068, + 490 + ], + [ + 1067, + 497 + ], + [ + 1089, + 496 + ], + [ + 1090, + 483 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1102, + 426 + ], + [ + 1095, + 437 + ], + [ + 1092, + 448 + ], + [ + 1087, + 446 + ], + [ + 1080, + 447 + ], + [ + 1078, + 450 + ], + [ + 1080, + 454 + ], + [ + 1087, + 454 + ], + [ + 1091, + 455 + ], + [ + 1085, + 462 + ], + [ + 1084, + 473 + ], + [ + 1086, + 485 + ], + [ + 1086, + 494 + ], + [ + 1086, + 500 + ], + [ + 1091, + 500 + ], + [ + 1098, + 500 + ], + [ + 1099, + 503 + ], + [ + 1101, + 505 + ], + [ + 1107, + 504 + ], + [ + 1140, + 424 + ], + [ + 1124, + 424 + ], + [ + 1108, + 424 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1162, + 419 + ], + [ + 1136, + 422 + ], + [ + 1125, + 427 + ], + [ + 1121, + 433 + ], + [ + 1114, + 443 + ], + [ + 1112, + 448 + ], + [ + 1107, + 448 + ], + [ + 1102, + 449 + ], + [ + 1101, + 453 + ], + [ + 1106, + 458 + ], + [ + 1108, + 458 + ], + [ + 1103, + 475 + ], + [ + 1102, + 494 + ], + [ + 1103, + 507 + ], + [ + 1104, + 512 + ], + [ + 1106, + 513 + ], + [ + 1115, + 513 + ], + [ + 1116, + 516 + ], + [ + 1119, + 519 + ], + [ + 1126, + 519 + ], + [ + 1132, + 518 + ], + [ + 1134, + 515 + ], + [ + 1135, + 507 + ], + [ + 1156, + 504 + ], + [ + 1160, + 502 + ], + [ + 1208, + 437 + ], + [ + 1206, + 419 + ], + [ + 1184, + 418 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1156, + 515 + ], + [ + 1155, + 491 + ], + [ + 1155, + 489 + ], + [ + 1155, + 420 + ], + [ + 1208, + 419 + ], + [ + 1211, + 419 + ], + [ + 1212, + 442 + ], + [ + 1211, + 468 + ], + [ + 1194, + 506 + ], + [ + 1169, + 517 + ], + [ + 1156, + 517 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1182, + 278 + ], + [ + 1184, + 478 + ], + [ + 1190, + 473 + ], + [ + 1188, + 278 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1174, + 280 + ], + [ + 1174, + 298 + ], + [ + 1183, + 298 + ], + [ + 1183, + 278 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1243, + 307 + ], + [ + 1188, + 307 + ], + [ + 1188, + 327 + ], + [ + 1206, + 327 + ], + [ + 1206, + 336 + ], + [ + 1231, + 336 + ], + [ + 1231, + 326 + ], + [ + 1244, + 326 + ], + [ + 1245, + 325 + ], + [ + 1248, + 322 + ], + [ + 1248, + 310 + ], + [ + 1247, + 309 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 1166, + 527 + ], + [ + 1169, + 515 + ], + [ + 1172, + 509 + ], + [ + 1171, + 503 + ], + [ + 1172, + 490 + ], + [ + 1183, + 475 + ], + [ + 1189, + 466 + ], + [ + 1189, + 460 + ], + [ + 1186, + 443 + ], + [ + 1180, + 436 + ], + [ + 1183, + 434 + ], + [ + 1191, + 442 + ], + [ + 1199, + 445 + ], + [ + 1212, + 454 + ], + [ + 1219, + 469 + ], + [ + 1218, + 473 + ], + [ + 1216, + 480 + ], + [ + 1218, + 483 + ], + [ + 1264, + 472 + ], + [ + 1273, + 471 + ], + [ + 1286, + 473 + ], + [ + 1293, + 473 + ], + [ + 1296, + 476 + ], + [ + 1297, + 479 + ], + [ + 1293, + 485 + ], + [ + 1291, + 487 + ], + [ + 1290, + 489 + ], + [ + 1296, + 500 + ], + [ + 1305, + 512 + ], + [ + 1294, + 510 + ], + [ + 1294, + 512 + ], + [ + 1298, + 520 + ], + [ + 1299, + 531 + ], + [ + 1296, + 539 + ], + [ + 1290, + 547 + ], + [ + 1283, + 548 + ], + [ + 1272, + 547 + ], + [ + 1264, + 540 + ], + [ + 1256, + 538 + ], + [ + 1248, + 535 + ], + [ + 1244, + 531 + ], + [ + 1234, + 532 + ], + [ + 1226, + 529 + ], + [ + 1215, + 530 + ], + [ + 1208, + 530 + ], + [ + 1203, + 526 + ], + [ + 1202, + 533 + ], + [ + 1199, + 539 + ], + [ + 1194, + 543 + ], + [ + 1187, + 545 + ], + [ + 1180, + 544 + ], + [ + 1172, + 539 + ], + [ + 1168, + 533 + ] + ] + }, + { + "label": "rider", + "polygon": [ + [ + 1238, + 396 + ], + [ + 1231, + 394 + ], + [ + 1225, + 394 + ], + [ + 1220, + 399 + ], + [ + 1217, + 405 + ], + [ + 1216, + 412 + ], + [ + 1216, + 414 + ], + [ + 1220, + 415 + ], + [ + 1223, + 421 + ], + [ + 1223, + 425 + ], + [ + 1224, + 428 + ], + [ + 1218, + 434 + ], + [ + 1210, + 441 + ], + [ + 1198, + 445 + ], + [ + 1186, + 447 + ], + [ + 1185, + 451 + ], + [ + 1185, + 456 + ], + [ + 1188, + 457 + ], + [ + 1193, + 458 + ], + [ + 1212, + 455 + ], + [ + 1223, + 451 + ], + [ + 1223, + 461 + ], + [ + 1224, + 468 + ], + [ + 1224, + 473 + ], + [ + 1216, + 482 + ], + [ + 1207, + 492 + ], + [ + 1206, + 496 + ], + [ + 1206, + 502 + ], + [ + 1210, + 512 + ], + [ + 1215, + 527 + ], + [ + 1218, + 534 + ], + [ + 1214, + 539 + ], + [ + 1209, + 543 + ], + [ + 1207, + 547 + ], + [ + 1208, + 549 + ], + [ + 1215, + 548 + ], + [ + 1233, + 542 + ], + [ + 1231, + 537 + ], + [ + 1226, + 529 + ], + [ + 1226, + 521 + ], + [ + 1221, + 504 + ], + [ + 1219, + 499 + ], + [ + 1231, + 492 + ], + [ + 1248, + 480 + ], + [ + 1256, + 477 + ], + [ + 1261, + 474 + ], + [ + 1264, + 466 + ], + [ + 1262, + 442 + ], + [ + 1255, + 421 + ], + [ + 1250, + 417 + ], + [ + 1246, + 416 + ], + [ + 1242, + 409 + ], + [ + 1242, + 402 + ], + [ + 1240, + 398 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1658, + 529 + ], + [ + 1663, + 225 + ], + [ + 1670, + 227 + ], + [ + 1665, + 474 + ], + [ + 1666, + 532 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1402, + 14 + ], + [ + 1402, + 68 + ], + [ + 1464, + 68 + ], + [ + 1462, + 15 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1402, + 81 + ], + [ + 1403, + 134 + ], + [ + 1465, + 133 + ], + [ + 1463, + 81 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1479, + 161 + ], + [ + 1384, + 161 + ], + [ + 1384, + 219 + ], + [ + 1479, + 220 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1794, + 181 + ], + [ + 1781, + 179 + ], + [ + 1767, + 159 + ], + [ + 1759, + 147 + ], + [ + 1814, + 1 + ], + [ + 1596, + 1 + ], + [ + 1600, + 6 + ], + [ + 1637, + 13 + ], + [ + 1664, + 7 + ], + [ + 1649, + 20 + ], + [ + 1646, + 32 + ], + [ + 1658, + 41 + ], + [ + 1667, + 34 + ], + [ + 1673, + 23 + ], + [ + 1680, + 31 + ], + [ + 1694, + 27 + ], + [ + 1694, + 28 + ], + [ + 1679, + 45 + ], + [ + 1659, + 57 + ], + [ + 1644, + 65 + ], + [ + 1627, + 78 + ], + [ + 1632, + 91 + ], + [ + 1640, + 94 + ], + [ + 1640, + 105 + ], + [ + 1646, + 113 + ], + [ + 1657, + 260 + ], + [ + 1655, + 274 + ], + [ + 1619, + 288 + ], + [ + 1600, + 301 + ], + [ + 1597, + 306 + ], + [ + 1604, + 309 + ], + [ + 1624, + 307 + ], + [ + 1652, + 306 + ], + [ + 1680, + 301 + ], + [ + 1693, + 291 + ], + [ + 1690, + 307 + ], + [ + 1694, + 310 + ], + [ + 1697, + 313 + ], + [ + 1701, + 322 + ], + [ + 1695, + 332 + ], + [ + 1697, + 347 + ], + [ + 1706, + 351 + ], + [ + 1725, + 363 + ], + [ + 1732, + 367 + ], + [ + 1716, + 376 + ], + [ + 1705, + 386 + ], + [ + 1694, + 377 + ], + [ + 1685, + 386 + ], + [ + 1667, + 393 + ], + [ + 1647, + 405 + ], + [ + 1655, + 413 + ], + [ + 1670, + 426 + ], + [ + 1675, + 432 + ], + [ + 1684, + 425 + ], + [ + 1697, + 411 + ], + [ + 1720, + 408 + ], + [ + 1737, + 413 + ], + [ + 1737, + 423 + ], + [ + 1738, + 431 + ], + [ + 1716, + 437 + ], + [ + 1695, + 433 + ], + [ + 1682, + 442 + ], + [ + 1667, + 453 + ], + [ + 1659, + 483 + ], + [ + 1674, + 504 + ], + [ + 1702, + 512 + ], + [ + 1702, + 520 + ], + [ + 1682, + 528 + ], + [ + 1688, + 538 + ], + [ + 1893, + 544 + ], + [ + 1987, + 538 + ], + [ + 2004, + 468 + ], + [ + 2000, + 125 + ], + [ + 1920, + 144 + ], + [ + 1910, + 213 + ], + [ + 1888, + 230 + ], + [ + 1856, + 242 + ], + [ + 1829, + 261 + ], + [ + 1818, + 268 + ], + [ + 1817, + 262 + ], + [ + 1807, + 261 + ], + [ + 1801, + 269 + ], + [ + 1793, + 263 + ], + [ + 1792, + 248 + ], + [ + 1801, + 242 + ], + [ + 1810, + 231 + ], + [ + 1809, + 219 + ], + [ + 1802, + 200 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1646, + 533 + ], + [ + 1653, + 478 + ], + [ + 1657, + 471 + ], + [ + 1663, + 471 + ], + [ + 1668, + 476 + ], + [ + 1672, + 533 + ], + [ + 1666, + 533 + ], + [ + 1664, + 487 + ], + [ + 1662, + 478 + ], + [ + 1658, + 480 + ], + [ + 1656, + 490 + ], + [ + 1652, + 534 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1607, + 535 + ], + [ + 1634, + 544 + ], + [ + 1648, + 550 + ], + [ + 1741, + 551 + ], + [ + 1884, + 549 + ], + [ + 1995, + 547 + ], + [ + 2000, + 538 + ], + [ + 1987, + 533 + ], + [ + 1903, + 532 + ], + [ + 1813, + 536 + ], + [ + 1781, + 539 + ], + [ + 1759, + 536 + ], + [ + 1741, + 527 + ], + [ + 1702, + 521 + ], + [ + 1683, + 524 + ], + [ + 1664, + 530 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1643, + 600 + ], + [ + 1641, + 255 + ], + [ + 1632, + 255 + ], + [ + 1633, + 601 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1598, + 203 + ], + [ + 1598, + 118 + ], + [ + 1667, + 111 + ], + [ + 1667, + 197 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1609, + 234 + ], + [ + 1606, + 197 + ], + [ + 1656, + 194 + ], + [ + 1658, + 231 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1612, + 266 + ], + [ + 1609, + 234 + ], + [ + 1658, + 230 + ], + [ + 1659, + 261 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1567, + 660 + ], + [ + 1558, + 183 + ], + [ + 1552, + 163 + ], + [ + 1547, + 190 + ], + [ + 1552, + 660 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1494, + 136 + ], + [ + 1553, + 194 + ], + [ + 1615, + 133 + ], + [ + 1557, + 74 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1939, + 575 + ], + [ + 1955, + 491 + ], + [ + 1958, + 483 + ], + [ + 1967, + 481 + ], + [ + 1979, + 483 + ], + [ + 1985, + 490 + ], + [ + 1993, + 579 + ], + [ + 1981, + 580 + ], + [ + 1977, + 511 + ], + [ + 1973, + 495 + ], + [ + 1967, + 492 + ], + [ + 1964, + 495 + ], + [ + 1952, + 577 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1659, + 88 + ], + [ + 1662, + 120 + ], + [ + 1665, + 141 + ], + [ + 1707, + 150 + ], + [ + 1871, + 151 + ], + [ + 1981, + 152 + ], + [ + 1985, + 602 + ], + [ + 2048, + 606 + ], + [ + 2048, + 1 + ], + [ + 1807, + 1 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1857, + 213 + ], + [ + 1861, + 109 + ], + [ + 1969, + 113 + ], + [ + 1967, + 213 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 78, + 445 + ], + [ + 80, + 439 + ], + [ + 79, + 435 + ], + [ + 74, + 433 + ], + [ + 69, + 433 + ], + [ + 66, + 435 + ], + [ + 65, + 439 + ], + [ + 65, + 443 + ], + [ + 65, + 446 + ], + [ + 58, + 446 + ], + [ + 57, + 453 + ], + [ + 31, + 452 + ], + [ + 0, + 452 + ], + [ + 0, + 544 + ], + [ + 69, + 548 + ], + [ + 40, + 557 + ], + [ + 31, + 562 + ], + [ + 29, + 565 + ], + [ + 106, + 561 + ], + [ + 116, + 561 + ], + [ + 122, + 556 + ], + [ + 95, + 546 + ], + [ + 98, + 455 + ], + [ + 87, + 453 + ], + [ + 86, + 447 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 219, + 425 + ], + [ + 218, + 420 + ], + [ + 221, + 415 + ], + [ + 228, + 415 + ], + [ + 231, + 418 + ], + [ + 231, + 421 + ], + [ + 231, + 425 + ], + [ + 235, + 426 + ], + [ + 238, + 428 + ], + [ + 238, + 434 + ], + [ + 237, + 437 + ], + [ + 241, + 437 + ], + [ + 247, + 522 + ], + [ + 237, + 529 + ], + [ + 248, + 531 + ], + [ + 255, + 537 + ], + [ + 255, + 542 + ], + [ + 193, + 548 + ], + [ + 194, + 541 + ], + [ + 201, + 537 + ], + [ + 222, + 531 + ], + [ + 222, + 528 + ], + [ + 217, + 524 + ], + [ + 208, + 436 + ], + [ + 213, + 436 + ], + [ + 213, + 431 + ], + [ + 215, + 428 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 353, + 505 + ], + [ + 352, + 437 + ], + [ + 353, + 431 + ], + [ + 356, + 427 + ], + [ + 357, + 422 + ], + [ + 359, + 418 + ], + [ + 364, + 418 + ], + [ + 366, + 419 + ], + [ + 367, + 424 + ], + [ + 368, + 426 + ], + [ + 370, + 428 + ], + [ + 373, + 434 + ], + [ + 374, + 439 + ], + [ + 378, + 504 + ], + [ + 369, + 511 + ], + [ + 379, + 512 + ], + [ + 383, + 518 + ], + [ + 379, + 521 + ], + [ + 346, + 524 + ], + [ + 326, + 521 + ], + [ + 329, + 517 + ], + [ + 338, + 513 + ], + [ + 356, + 510 + ], + [ + 358, + 510 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 322, + 513 + ], + [ + 318, + 436 + ], + [ + 317, + 434 + ], + [ + 316, + 429 + ], + [ + 314, + 427 + ], + [ + 310, + 426 + ], + [ + 311, + 421 + ], + [ + 309, + 417 + ], + [ + 304, + 416 + ], + [ + 301, + 417 + ], + [ + 299, + 421 + ], + [ + 299, + 424 + ], + [ + 300, + 427 + ], + [ + 297, + 428 + ], + [ + 296, + 433 + ], + [ + 296, + 437 + ], + [ + 294, + 439 + ], + [ + 298, + 513 + ], + [ + 302, + 517 + ], + [ + 288, + 528 + ], + [ + 288, + 530 + ], + [ + 317, + 531 + ], + [ + 340, + 526 + ], + [ + 339, + 522 + ], + [ + 318, + 520 + ], + [ + 318, + 516 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 681, + 426 + ], + [ + 675, + 432 + ], + [ + 672, + 442 + ], + [ + 674, + 454 + ], + [ + 675, + 459 + ], + [ + 677, + 462 + ], + [ + 680, + 463 + ], + [ + 683, + 462 + ], + [ + 684, + 460 + ], + [ + 685, + 457 + ], + [ + 705, + 458 + ], + [ + 705, + 461 + ], + [ + 708, + 462 + ], + [ + 711, + 462 + ], + [ + 713, + 460 + ], + [ + 714, + 457 + ], + [ + 735, + 458 + ], + [ + 735, + 460 + ], + [ + 737, + 461 + ], + [ + 740, + 461 + ], + [ + 746, + 455 + ], + [ + 747, + 448 + ], + [ + 738, + 438 + ], + [ + 724, + 428 + ], + [ + 721, + 427 + ], + [ + 701, + 424 + ], + [ + 688, + 425 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 739, + 422 + ], + [ + 738, + 427 + ], + [ + 739, + 432 + ], + [ + 739, + 439 + ], + [ + 736, + 446 + ], + [ + 736, + 449 + ], + [ + 736, + 450 + ], + [ + 737, + 450 + ], + [ + 738, + 449 + ], + [ + 739, + 446 + ], + [ + 741, + 444 + ], + [ + 743, + 447 + ], + [ + 742, + 453 + ], + [ + 743, + 459 + ], + [ + 741, + 462 + ], + [ + 743, + 463 + ], + [ + 746, + 462 + ], + [ + 747, + 461 + ], + [ + 747, + 456 + ], + [ + 747, + 450 + ], + [ + 750, + 453 + ], + [ + 755, + 457 + ], + [ + 755, + 458 + ], + [ + 757, + 460 + ], + [ + 759, + 460 + ], + [ + 760, + 458 + ], + [ + 760, + 455 + ], + [ + 751, + 447 + ], + [ + 753, + 440 + ], + [ + 753, + 433 + ], + [ + 749, + 429 + ], + [ + 746, + 426 + ], + [ + 743, + 422 + ], + [ + 742, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 566, + 463 + ], + [ + 565, + 451 + ], + [ + 566, + 436 + ], + [ + 571, + 430 + ], + [ + 577, + 413 + ], + [ + 580, + 413 + ], + [ + 582, + 407 + ], + [ + 619, + 409 + ], + [ + 625, + 410 + ], + [ + 627, + 415 + ], + [ + 631, + 420 + ], + [ + 633, + 455 + ], + [ + 632, + 463 + ], + [ + 627, + 466 + ], + [ + 623, + 466 + ], + [ + 622, + 462 + ], + [ + 621, + 467 + ], + [ + 614, + 467 + ], + [ + 611, + 462 + ], + [ + 586, + 461 + ], + [ + 586, + 465 + ], + [ + 584, + 466 + ], + [ + 580, + 466 + ], + [ + 580, + 460 + ], + [ + 576, + 460 + ], + [ + 575, + 465 + ], + [ + 570, + 466 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 398, + 513 + ], + [ + 427, + 513 + ], + [ + 431, + 510 + ], + [ + 422, + 505 + ], + [ + 429, + 499 + ], + [ + 427, + 436 + ], + [ + 423, + 435 + ], + [ + 424, + 428 + ], + [ + 418, + 427 + ], + [ + 418, + 424 + ], + [ + 417, + 420 + ], + [ + 413, + 419 + ], + [ + 411, + 420 + ], + [ + 410, + 424 + ], + [ + 411, + 427 + ], + [ + 407, + 427 + ], + [ + 407, + 435 + ], + [ + 405, + 435 + ], + [ + 406, + 497 + ], + [ + 412, + 502 + ], + [ + 403, + 502 + ], + [ + 399, + 508 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 353, + 505 + ], + [ + 352, + 437 + ], + [ + 353, + 431 + ], + [ + 356, + 427 + ], + [ + 357, + 422 + ], + [ + 359, + 418 + ], + [ + 364, + 418 + ], + [ + 366, + 419 + ], + [ + 367, + 424 + ], + [ + 368, + 426 + ], + [ + 370, + 428 + ], + [ + 373, + 434 + ], + [ + 374, + 439 + ], + [ + 378, + 504 + ], + [ + 369, + 511 + ], + [ + 379, + 512 + ], + [ + 383, + 518 + ], + [ + 379, + 521 + ], + [ + 346, + 524 + ], + [ + 326, + 521 + ], + [ + 329, + 517 + ], + [ + 338, + 513 + ], + [ + 356, + 510 + ], + [ + 358, + 510 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 322, + 513 + ], + [ + 318, + 436 + ], + [ + 317, + 434 + ], + [ + 316, + 429 + ], + [ + 314, + 427 + ], + [ + 310, + 426 + ], + [ + 311, + 421 + ], + [ + 309, + 417 + ], + [ + 304, + 416 + ], + [ + 301, + 417 + ], + [ + 299, + 421 + ], + [ + 299, + 424 + ], + [ + 300, + 427 + ], + [ + 297, + 428 + ], + [ + 296, + 433 + ], + [ + 296, + 437 + ], + [ + 294, + 439 + ], + [ + 298, + 513 + ], + [ + 302, + 517 + ], + [ + 288, + 528 + ], + [ + 288, + 530 + ], + [ + 317, + 531 + ], + [ + 340, + 526 + ], + [ + 339, + 522 + ], + [ + 318, + 520 + ], + [ + 318, + 516 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 219, + 425 + ], + [ + 218, + 420 + ], + [ + 221, + 415 + ], + [ + 228, + 415 + ], + [ + 231, + 418 + ], + [ + 231, + 421 + ], + [ + 231, + 425 + ], + [ + 235, + 426 + ], + [ + 238, + 428 + ], + [ + 238, + 434 + ], + [ + 237, + 437 + ], + [ + 241, + 437 + ], + [ + 247, + 522 + ], + [ + 237, + 529 + ], + [ + 248, + 531 + ], + [ + 255, + 537 + ], + [ + 255, + 542 + ], + [ + 193, + 548 + ], + [ + 194, + 541 + ], + [ + 201, + 537 + ], + [ + 222, + 531 + ], + [ + 222, + 528 + ], + [ + 217, + 524 + ], + [ + 208, + 436 + ], + [ + 213, + 436 + ], + [ + 213, + 431 + ], + [ + 215, + 428 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000081_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000081_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..6aab6979e2218a89526bfe093da4e0c029014f68 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000081_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000081_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000081_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..72729b4bd4cf80d99adfa69478e2dfb2e5545e6c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000081_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000082_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000082_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..3dc5e243d2f9c8b80fe608fb9ce225d709b0e6f0 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000082_000019_gtFine_polygons.json @@ -0,0 +1,3887 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1217, + 0 + ], + [ + 716, + 0 + ], + [ + 880, + 262 + ], + [ + 1010, + 392 + ], + [ + 1071, + 391 + ], + [ + 1212, + 320 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 116, + 1025 + ], + [ + 992, + 480 + ], + [ + 1211, + 427 + ], + [ + 1446, + 430 + ], + [ + 2047, + 532 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1019, + 385 + ], + [ + 1018, + 370 + ], + [ + 1013, + 370 + ], + [ + 1011, + 299 + ], + [ + 1005, + 293 + ], + [ + 1004, + 278 + ], + [ + 1002, + 273 + ], + [ + 1002, + 241 + ], + [ + 991, + 225 + ], + [ + 975, + 200 + ], + [ + 948, + 191 + ], + [ + 923, + 172 + ], + [ + 908, + 169 + ], + [ + 895, + 167 + ], + [ + 891, + 160 + ], + [ + 873, + 159 + ], + [ + 859, + 131 + ], + [ + 812, + 76 + ], + [ + 768, + 25 + ], + [ + 759, + 13 + ], + [ + 754, + 1 + ], + [ + 0, + 1 + ], + [ + 0, + 625 + ], + [ + 1015, + 433 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1050, + 372 + ], + [ + 1032, + 372 + ], + [ + 1022, + 376 + ], + [ + 1000, + 376 + ], + [ + 999, + 344 + ], + [ + 981, + 344 + ], + [ + 982, + 371 + ], + [ + 976, + 371 + ], + [ + 967, + 351 + ], + [ + 951, + 348 + ], + [ + 942, + 366 + ], + [ + 942, + 437 + ], + [ + 999, + 498 + ], + [ + 1054, + 387 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1107, + 326 + ], + [ + 1107, + 302 + ], + [ + 1179, + 301 + ], + [ + 1174, + 14 + ], + [ + 1183, + 1 + ], + [ + 2048, + 1 + ], + [ + 2048, + 565 + ], + [ + 1578, + 488 + ], + [ + 1280, + 442 + ], + [ + 1189, + 439 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2048, + 593 + ], + [ + 1986, + 583 + ], + [ + 1991, + 488 + ], + [ + 2048, + 491 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2006, + 63 + ], + [ + 2007, + 139 + ], + [ + 2012, + 144 + ], + [ + 2014, + 475 + ], + [ + 2014, + 553 + ], + [ + 2027, + 555 + ], + [ + 2024, + 142 + ], + [ + 2029, + 140 + ], + [ + 2034, + 62 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1544, + 252 + ], + [ + 1544, + 290 + ], + [ + 1547, + 295 + ], + [ + 1547, + 405 + ], + [ + 1553, + 408 + ], + [ + 1552, + 295 + ], + [ + 1556, + 288 + ], + [ + 1556, + 253 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1625, + 286 + ], + [ + 1618, + 287 + ], + [ + 1589, + 311 + ], + [ + 1585, + 326 + ], + [ + 1587, + 423 + ], + [ + 1592, + 427 + ], + [ + 1588, + 325 + ], + [ + 1591, + 312 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1624, + 307 + ], + [ + 1624, + 281 + ], + [ + 1637, + 280 + ], + [ + 1638, + 308 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1605, + 373 + ], + [ + 1590, + 373 + ], + [ + 1590, + 399 + ], + [ + 1605, + 397 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1572, + 374 + ], + [ + 1572, + 398 + ], + [ + 1587, + 398 + ], + [ + 1586, + 373 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1640, + 419 + ], + [ + 1640, + 353 + ], + [ + 1646, + 351 + ], + [ + 1646, + 341 + ], + [ + 1630, + 339 + ], + [ + 1629, + 351 + ], + [ + 1634, + 352 + ], + [ + 1634, + 411 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1672, + 417 + ], + [ + 1670, + 352 + ], + [ + 1662, + 353 + ], + [ + 1665, + 418 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1667, + 302 + ], + [ + 1641, + 307 + ], + [ + 1641, + 314 + ], + [ + 1654, + 318 + ], + [ + 1655, + 322 + ], + [ + 1641, + 325 + ], + [ + 1642, + 330 + ], + [ + 1656, + 335 + ], + [ + 1654, + 340 + ], + [ + 1642, + 343 + ], + [ + 1642, + 349 + ], + [ + 1655, + 352 + ], + [ + 1655, + 356 + ], + [ + 1665, + 355 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1680, + 353 + ], + [ + 1682, + 298 + ], + [ + 1659, + 298 + ], + [ + 1661, + 359 + ], + [ + 1675, + 358 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1643, + 229 + ], + [ + 1654, + 227 + ], + [ + 1665, + 229 + ], + [ + 1674, + 237 + ], + [ + 1676, + 245 + ], + [ + 1676, + 256 + ], + [ + 1669, + 267 + ], + [ + 1654, + 272 + ], + [ + 1642, + 269 + ], + [ + 1637, + 263 + ], + [ + 1633, + 252 + ], + [ + 1635, + 238 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1674, + 281 + ], + [ + 1654, + 264 + ], + [ + 1635, + 283 + ], + [ + 1654, + 304 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1601, + 418 + ], + [ + 1585, + 416 + ], + [ + 1563, + 416 + ], + [ + 1564, + 433 + ], + [ + 1583, + 450 + ], + [ + 1600, + 431 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1733, + 412 + ], + [ + 1679, + 408 + ], + [ + 1624, + 408 + ], + [ + 1606, + 411 + ], + [ + 1596, + 418 + ], + [ + 1577, + 435 + ], + [ + 1604, + 541 + ], + [ + 1618, + 537 + ], + [ + 1626, + 530 + ], + [ + 1649, + 528 + ], + [ + 1730, + 419 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1304, + 373 + ], + [ + 1310, + 353 + ], + [ + 1307, + 340 + ], + [ + 1295, + 319 + ], + [ + 1288, + 302 + ], + [ + 1280, + 277 + ], + [ + 1268, + 278 + ], + [ + 1256, + 255 + ], + [ + 1230, + 253 + ], + [ + 1203, + 262 + ], + [ + 1190, + 286 + ], + [ + 1174, + 296 + ], + [ + 1183, + 310 + ], + [ + 1186, + 326 + ], + [ + 1204, + 443 + ], + [ + 1248, + 438 + ], + [ + 1291, + 433 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1234, + 425 + ], + [ + 1227, + 430 + ], + [ + 1224, + 450 + ], + [ + 1236, + 454 + ], + [ + 1257, + 459 + ], + [ + 1283, + 460 + ], + [ + 1293, + 417 + ], + [ + 1252, + 421 + ], + [ + 1248, + 426 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1230, + 459 + ], + [ + 1233, + 437 + ], + [ + 1230, + 426 + ], + [ + 1228, + 416 + ], + [ + 1221, + 418 + ], + [ + 1218, + 429 + ], + [ + 1219, + 448 + ], + [ + 1223, + 462 + ], + [ + 1227, + 463 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1816, + 396 + ], + [ + 1804, + 392 + ], + [ + 1774, + 390 + ], + [ + 1746, + 395 + ], + [ + 1732, + 407 + ], + [ + 1714, + 426 + ], + [ + 1694, + 450 + ], + [ + 1677, + 449 + ], + [ + 1665, + 451 + ], + [ + 1664, + 458 + ], + [ + 1675, + 465 + ], + [ + 1656, + 476 + ], + [ + 1648, + 489 + ], + [ + 1642, + 514 + ], + [ + 1641, + 546 + ], + [ + 1644, + 560 + ], + [ + 1649, + 566 + ], + [ + 1658, + 570 + ], + [ + 1673, + 570 + ], + [ + 1681, + 562 + ], + [ + 1681, + 554 + ], + [ + 1727, + 561 + ], + [ + 1728, + 578 + ], + [ + 1733, + 588 + ], + [ + 1738, + 592 + ], + [ + 1762, + 592 + ], + [ + 1772, + 584 + ], + [ + 1777, + 566 + ], + [ + 1872, + 572 + ], + [ + 1894, + 576 + ], + [ + 1904, + 573 + ], + [ + 1938, + 576 + ], + [ + 1942, + 590 + ], + [ + 1958, + 597 + ], + [ + 1977, + 597 + ], + [ + 1991, + 590 + ], + [ + 1999, + 571 + ], + [ + 2011, + 565 + ], + [ + 2020, + 553 + ], + [ + 2028, + 506 + ], + [ + 2027, + 489 + ], + [ + 2014, + 467 + ], + [ + 1969, + 407 + ], + [ + 1944, + 395 + ], + [ + 1924, + 393 + ], + [ + 1895, + 393 + ], + [ + 1886, + 397 + ], + [ + 1846, + 394 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1872, + 508 + ], + [ + 1874, + 489 + ], + [ + 1950, + 491 + ], + [ + 1950, + 510 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 62, + 148 + ], + [ + 78, + 129 + ], + [ + 94, + 118 + ], + [ + 127, + 126 + ], + [ + 121, + 88 + ], + [ + 129, + 53 + ], + [ + 176, + 53 + ], + [ + 203, + 83 + ], + [ + 218, + 75 + ], + [ + 217, + 47 + ], + [ + 247, + 66 + ], + [ + 260, + 64 + ], + [ + 249, + 42 + ], + [ + 278, + 53 + ], + [ + 307, + 63 + ], + [ + 309, + 47 + ], + [ + 316, + 25 + ], + [ + 325, + 1 + ], + [ + 60, + 1 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 148, + 296 + ], + [ + 138, + 0 + ], + [ + 153, + 0 + ], + [ + 164, + 246 + ], + [ + 171, + 427 + ], + [ + 151, + 428 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 277, + 258 + ], + [ + 277, + 232 + ], + [ + 283, + 232 + ], + [ + 285, + 228 + ], + [ + 294, + 230 + ], + [ + 292, + 262 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 354, + 301 + ], + [ + 342, + 301 + ], + [ + 339, + 302 + ], + [ + 339, + 311 + ], + [ + 332, + 315 + ], + [ + 332, + 318 + ], + [ + 340, + 319 + ], + [ + 340, + 322 + ], + [ + 334, + 325 + ], + [ + 333, + 327 + ], + [ + 340, + 329 + ], + [ + 340, + 334 + ], + [ + 335, + 336 + ], + [ + 335, + 339 + ], + [ + 342, + 341 + ], + [ + 345, + 345 + ], + [ + 356, + 344 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 353, + 424 + ], + [ + 348, + 284 + ], + [ + 346, + 273 + ], + [ + 340, + 265 + ], + [ + 303, + 235 + ], + [ + 293, + 233 + ], + [ + 292, + 238 + ], + [ + 302, + 239 + ], + [ + 340, + 270 + ], + [ + 344, + 279 + ], + [ + 346, + 306 + ], + [ + 350, + 427 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 356, + 400 + ], + [ + 355, + 365 + ], + [ + 339, + 365 + ], + [ + 339, + 400 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 287, + 156 + ], + [ + 296, + 427 + ], + [ + 307, + 428 + ], + [ + 295, + 157 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 506, + 298 + ], + [ + 505, + 336 + ], + [ + 513, + 339 + ], + [ + 515, + 430 + ], + [ + 520, + 429 + ], + [ + 516, + 298 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 584, + 131 + ], + [ + 589, + 422 + ], + [ + 588, + 438 + ], + [ + 598, + 429 + ], + [ + 592, + 127 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 777, + 367 + ], + [ + 773, + 229 + ], + [ + 768, + 229 + ], + [ + 769, + 367 + ], + [ + 771, + 391 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 837, + 436 + ], + [ + 835, + 379 + ], + [ + 837, + 371 + ], + [ + 836, + 285 + ], + [ + 869, + 285 + ], + [ + 923, + 302 + ], + [ + 928, + 436 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 783, + 370 + ], + [ + 769, + 356 + ], + [ + 758, + 353 + ], + [ + 754, + 355 + ], + [ + 750, + 366 + ], + [ + 748, + 378 + ], + [ + 745, + 388 + ], + [ + 737, + 402 + ], + [ + 731, + 419 + ], + [ + 730, + 433 + ], + [ + 794, + 432 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 902, + 446 + ], + [ + 899, + 410 + ], + [ + 896, + 391 + ], + [ + 893, + 375 + ], + [ + 890, + 369 + ], + [ + 879, + 366 + ], + [ + 868, + 371 + ], + [ + 861, + 385 + ], + [ + 857, + 397 + ], + [ + 846, + 413 + ], + [ + 842, + 426 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 289, + 426 + ], + [ + 285, + 403 + ], + [ + 296, + 381 + ], + [ + 294, + 353 + ], + [ + 285, + 330 + ], + [ + 271, + 318 + ], + [ + 279, + 308 + ], + [ + 277, + 290 + ], + [ + 256, + 253 + ], + [ + 228, + 232 + ], + [ + 215, + 223 + ], + [ + 194, + 223 + ], + [ + 182, + 227 + ], + [ + 170, + 237 + ], + [ + 163, + 248 + ], + [ + 158, + 267 + ], + [ + 157, + 282 + ], + [ + 141, + 297 + ], + [ + 120, + 333 + ], + [ + 122, + 349 + ], + [ + 132, + 357 + ], + [ + 149, + 366 + ], + [ + 171, + 390 + ], + [ + 171, + 406 + ], + [ + 156, + 393 + ], + [ + 141, + 391 + ], + [ + 131, + 397 + ], + [ + 129, + 410 + ], + [ + 128, + 423 + ], + [ + 201, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 974, + 429 + ], + [ + 972, + 334 + ], + [ + 966, + 337 + ], + [ + 967, + 426 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 939, + 289 + ], + [ + 937, + 260 + ], + [ + 944, + 250 + ], + [ + 942, + 176 + ], + [ + 943, + 158 + ], + [ + 949, + 142 + ], + [ + 955, + 132 + ], + [ + 966, + 120 + ], + [ + 974, + 115 + ], + [ + 986, + 109 + ], + [ + 1132, + 72 + ], + [ + 1171, + 69 + ], + [ + 1175, + 68 + ], + [ + 1174, + 73 + ], + [ + 1134, + 76 + ], + [ + 1061, + 95 + ], + [ + 993, + 113 + ], + [ + 976, + 120 + ], + [ + 968, + 127 + ], + [ + 960, + 136 + ], + [ + 953, + 147 + ], + [ + 949, + 163 + ], + [ + 948, + 178 + ], + [ + 948, + 196 + ], + [ + 951, + 265 + ], + [ + 952, + 272 + ], + [ + 954, + 281 + ], + [ + 956, + 450 + ], + [ + 934, + 445 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 989, + 330 + ], + [ + 976, + 329 + ], + [ + 979, + 323 + ], + [ + 990, + 320 + ], + [ + 989, + 312 + ], + [ + 980, + 312 + ], + [ + 981, + 305 + ], + [ + 988, + 304 + ], + [ + 987, + 297 + ], + [ + 961, + 296 + ], + [ + 962, + 342 + ], + [ + 968, + 348 + ], + [ + 973, + 347 + ], + [ + 977, + 341 + ], + [ + 989, + 336 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 952, + 341 + ], + [ + 943, + 291 + ], + [ + 959, + 292 + ], + [ + 961, + 292 + ], + [ + 962, + 341 + ], + [ + 960, + 343 + ], + [ + 960, + 350 + ], + [ + 952, + 350 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 978, + 198 + ], + [ + 970, + 201 + ], + [ + 965, + 206 + ], + [ + 962, + 231 + ], + [ + 966, + 237 + ], + [ + 972, + 241 + ], + [ + 984, + 241 + ], + [ + 990, + 237 + ], + [ + 994, + 229 + ], + [ + 996, + 218 + ], + [ + 992, + 206 + ], + [ + 988, + 201 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 996, + 262 + ], + [ + 995, + 237 + ], + [ + 966, + 239 + ], + [ + 967, + 263 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 995, + 277 + ], + [ + 956, + 278 + ], + [ + 976, + 331 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 920, + 178 + ], + [ + 920, + 183 + ], + [ + 919, + 186 + ], + [ + 921, + 191 + ], + [ + 920, + 198 + ], + [ + 916, + 202 + ], + [ + 915, + 207 + ], + [ + 918, + 210 + ], + [ + 920, + 212 + ], + [ + 920, + 218 + ], + [ + 915, + 222 + ], + [ + 915, + 226 + ], + [ + 917, + 229 + ], + [ + 920, + 232 + ], + [ + 920, + 239 + ], + [ + 916, + 240 + ], + [ + 916, + 247 + ], + [ + 920, + 251 + ], + [ + 921, + 255 + ], + [ + 927, + 255 + ], + [ + 929, + 259 + ], + [ + 940, + 258 + ], + [ + 939, + 185 + ], + [ + 943, + 185 + ], + [ + 942, + 174 + ], + [ + 926, + 174 + ], + [ + 925, + 179 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 966, + 194 + ], + [ + 962, + 191 + ], + [ + 954, + 192 + ], + [ + 945, + 197 + ], + [ + 944, + 202 + ], + [ + 944, + 207 + ], + [ + 946, + 209 + ], + [ + 946, + 215 + ], + [ + 942, + 219 + ], + [ + 942, + 224 + ], + [ + 944, + 227 + ], + [ + 947, + 230 + ], + [ + 946, + 235 + ], + [ + 943, + 238 + ], + [ + 941, + 241 + ], + [ + 942, + 246 + ], + [ + 944, + 249 + ], + [ + 944, + 255 + ], + [ + 965, + 256 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 858, + 301 + ], + [ + 799, + 275 + ], + [ + 800, + 262 + ], + [ + 930, + 261 + ], + [ + 938, + 267 + ], + [ + 938, + 272 + ], + [ + 927, + 272 + ], + [ + 927, + 278 + ], + [ + 946, + 293 + ], + [ + 946, + 422 + ], + [ + 903, + 426 + ], + [ + 901, + 301 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 816, + 354 + ], + [ + 815, + 231 + ], + [ + 803, + 225 + ], + [ + 803, + 236 + ], + [ + 806, + 347 + ], + [ + 810, + 365 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 754, + 77 + ], + [ + 757, + 238 + ], + [ + 803, + 236 + ], + [ + 860, + 236 + ], + [ + 856, + 75 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 771, + 440 + ], + [ + 763, + 418 + ], + [ + 764, + 392 + ], + [ + 760, + 387 + ], + [ + 753, + 383 + ], + [ + 749, + 378 + ], + [ + 750, + 370 + ], + [ + 752, + 363 + ], + [ + 755, + 361 + ], + [ + 762, + 362 + ], + [ + 768, + 364 + ], + [ + 770, + 367 + ], + [ + 772, + 358 + ], + [ + 777, + 352 + ], + [ + 779, + 347 + ], + [ + 772, + 342 + ], + [ + 768, + 332 + ], + [ + 763, + 322 + ], + [ + 763, + 315 + ], + [ + 768, + 309 + ], + [ + 778, + 303 + ], + [ + 785, + 303 + ], + [ + 790, + 306 + ], + [ + 795, + 311 + ], + [ + 798, + 319 + ], + [ + 797, + 327 + ], + [ + 798, + 331 + ], + [ + 804, + 338 + ], + [ + 811, + 344 + ], + [ + 816, + 348 + ], + [ + 821, + 364 + ], + [ + 826, + 391 + ], + [ + 831, + 411 + ], + [ + 832, + 427 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 996, + 524 + ], + [ + 868, + 616 + ], + [ + 615, + 809 + ], + [ + 324, + 1023 + ], + [ + 0, + 1023 + ], + [ + 1, + 936 + ], + [ + 985, + 492 + ], + [ + 1004, + 500 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 477, + 60 + ], + [ + 480, + 131 + ], + [ + 685, + 127 + ], + [ + 692, + 93 + ], + [ + 685, + 58 + ], + [ + 671, + 55 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 705, + 427 + ], + [ + 697, + 42 + ], + [ + 681, + 41 + ], + [ + 689, + 428 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 341, + 418 + ], + [ + 104, + 416 + ], + [ + 101, + 403 + ], + [ + 31, + 405 + ], + [ + 31, + 414 + ], + [ + 0, + 414 + ], + [ + 0, + 1023 + ], + [ + 64, + 1023 + ], + [ + 88, + 1001 + ], + [ + 90, + 975 + ], + [ + 408, + 805 + ], + [ + 408, + 811 + ], + [ + 437, + 809 + ], + [ + 436, + 797 + ], + [ + 445, + 784 + ], + [ + 591, + 708 + ], + [ + 592, + 713 + ], + [ + 611, + 712 + ], + [ + 611, + 702 + ], + [ + 619, + 699 + ], + [ + 619, + 693 + ], + [ + 707, + 647 + ], + [ + 708, + 655 + ], + [ + 715, + 652 + ], + [ + 717, + 641 + ], + [ + 771, + 609 + ], + [ + 772, + 614 + ], + [ + 781, + 612 + ], + [ + 781, + 606 + ], + [ + 786, + 605 + ], + [ + 786, + 601 + ], + [ + 823, + 584 + ], + [ + 824, + 588 + ], + [ + 833, + 586 + ], + [ + 835, + 581 + ], + [ + 837, + 576 + ], + [ + 860, + 566 + ], + [ + 862, + 568 + ], + [ + 871, + 567 + ], + [ + 874, + 558 + ], + [ + 893, + 549 + ], + [ + 895, + 553 + ], + [ + 901, + 550 + ], + [ + 902, + 543 + ], + [ + 915, + 537 + ], + [ + 916, + 539 + ], + [ + 920, + 539 + ], + [ + 923, + 533 + ], + [ + 930, + 529 + ], + [ + 944, + 524 + ], + [ + 967, + 514 + ], + [ + 980, + 511 + ], + [ + 993, + 506 + ], + [ + 992, + 417 + ], + [ + 943, + 419 + ], + [ + 855, + 419 + ], + [ + 835, + 419 + ], + [ + 833, + 416 + ], + [ + 819, + 416 + ], + [ + 819, + 418 + ], + [ + 782, + 417 + ], + [ + 782, + 413 + ], + [ + 766, + 413 + ], + [ + 766, + 417 + ], + [ + 715, + 417 + ], + [ + 715, + 413 + ], + [ + 694, + 414 + ], + [ + 694, + 417 + ], + [ + 616, + 418 + ], + [ + 615, + 413 + ], + [ + 584, + 413 + ], + [ + 584, + 418 + ], + [ + 440, + 418 + ], + [ + 440, + 411 + ], + [ + 402, + 411 + ], + [ + 402, + 417 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1171, + 117 + ], + [ + 1170, + 53 + ], + [ + 1171, + 50 + ], + [ + 1174, + 49 + ], + [ + 1198, + 49 + ], + [ + 1202, + 50 + ], + [ + 1203, + 54 + ], + [ + 1203, + 116 + ], + [ + 1202, + 119 + ], + [ + 1198, + 120 + ], + [ + 1175, + 121 + ], + [ + 1173, + 120 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1031, + 409 + ], + [ + 1030, + 399 + ], + [ + 1030, + 395 + ], + [ + 1032, + 394 + ], + [ + 1037, + 393 + ], + [ + 1041, + 364 + ], + [ + 1048, + 326 + ], + [ + 1050, + 322 + ], + [ + 1055, + 319 + ], + [ + 1063, + 318 + ], + [ + 1119, + 317 + ], + [ + 1165, + 318 + ], + [ + 1192, + 321 + ], + [ + 1197, + 323 + ], + [ + 1201, + 326 + ], + [ + 1207, + 370 + ], + [ + 1209, + 396 + ], + [ + 1214, + 398 + ], + [ + 1214, + 419 + ], + [ + 1210, + 439 + ], + [ + 1208, + 451 + ], + [ + 1204, + 461 + ], + [ + 1041, + 434 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 995, + 533 + ], + [ + 992, + 510 + ], + [ + 993, + 487 + ], + [ + 997, + 472 + ], + [ + 1001, + 458 + ], + [ + 995, + 457 + ], + [ + 992, + 455 + ], + [ + 991, + 451 + ], + [ + 991, + 446 + ], + [ + 995, + 442 + ], + [ + 1003, + 441 + ], + [ + 1011, + 441 + ], + [ + 1021, + 418 + ], + [ + 1032, + 401 + ], + [ + 1037, + 396 + ], + [ + 1043, + 395 + ], + [ + 1085, + 393 + ], + [ + 1116, + 393 + ], + [ + 1153, + 395 + ], + [ + 1176, + 397 + ], + [ + 1179, + 402 + ], + [ + 1194, + 429 + ], + [ + 1203, + 447 + ], + [ + 1207, + 445 + ], + [ + 1220, + 445 + ], + [ + 1224, + 448 + ], + [ + 1225, + 453 + ], + [ + 1224, + 457 + ], + [ + 1222, + 459 + ], + [ + 1210, + 460 + ], + [ + 1212, + 463 + ], + [ + 1213, + 468 + ], + [ + 1213, + 474 + ], + [ + 1217, + 489 + ], + [ + 1217, + 512 + ], + [ + 1215, + 527 + ], + [ + 1214, + 549 + ], + [ + 1214, + 572 + ], + [ + 1209, + 577 + ], + [ + 1190, + 578 + ], + [ + 1185, + 576 + ], + [ + 1183, + 572 + ], + [ + 1182, + 551 + ], + [ + 1067, + 549 + ], + [ + 1028, + 552 + ], + [ + 1028, + 571 + ], + [ + 1024, + 577 + ], + [ + 1014, + 578 + ], + [ + 998, + 577 + ], + [ + 993, + 571 + ], + [ + 994, + 549 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1066, + 488 + ], + [ + 1067, + 470 + ], + [ + 1138, + 471 + ], + [ + 1138, + 489 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1273, + 500 + ], + [ + 1273, + 484 + ], + [ + 1272, + 467 + ], + [ + 1272, + 460 + ], + [ + 1275, + 455 + ], + [ + 1276, + 448 + ], + [ + 1281, + 432 + ], + [ + 1282, + 428 + ], + [ + 1266, + 427 + ], + [ + 1263, + 426 + ], + [ + 1263, + 421 + ], + [ + 1265, + 416 + ], + [ + 1266, + 414 + ], + [ + 1279, + 414 + ], + [ + 1281, + 418 + ], + [ + 1287, + 417 + ], + [ + 1293, + 393 + ], + [ + 1299, + 374 + ], + [ + 1304, + 369 + ], + [ + 1310, + 364 + ], + [ + 1315, + 362 + ], + [ + 1320, + 358 + ], + [ + 1318, + 356 + ], + [ + 1327, + 351 + ], + [ + 1404, + 352 + ], + [ + 1451, + 354 + ], + [ + 1454, + 365 + ], + [ + 1458, + 367 + ], + [ + 1465, + 376 + ], + [ + 1470, + 394 + ], + [ + 1307, + 536 + ], + [ + 1293, + 533 + ], + [ + 1289, + 524 + ], + [ + 1274, + 522 + ], + [ + 1272, + 519 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1309, + 565 + ], + [ + 1305, + 557 + ], + [ + 1306, + 526 + ], + [ + 1306, + 508 + ], + [ + 1306, + 491 + ], + [ + 1306, + 481 + ], + [ + 1309, + 477 + ], + [ + 1326, + 457 + ], + [ + 1324, + 455 + ], + [ + 1306, + 453 + ], + [ + 1304, + 451 + ], + [ + 1305, + 442 + ], + [ + 1308, + 440 + ], + [ + 1313, + 439 + ], + [ + 1329, + 437 + ], + [ + 1330, + 442 + ], + [ + 1328, + 452 + ], + [ + 1332, + 451 + ], + [ + 1335, + 440 + ], + [ + 1345, + 419 + ], + [ + 1358, + 401 + ], + [ + 1368, + 393 + ], + [ + 1376, + 390 + ], + [ + 1381, + 389 + ], + [ + 1381, + 379 + ], + [ + 1381, + 371 + ], + [ + 1386, + 366 + ], + [ + 1413, + 365 + ], + [ + 1426, + 367 + ], + [ + 1430, + 369 + ], + [ + 1430, + 380 + ], + [ + 1430, + 387 + ], + [ + 1468, + 387 + ], + [ + 1507, + 389 + ], + [ + 1533, + 392 + ], + [ + 1544, + 393 + ], + [ + 1554, + 397 + ], + [ + 1558, + 402 + ], + [ + 1583, + 433 + ], + [ + 1600, + 461 + ], + [ + 1607, + 472 + ], + [ + 1610, + 482 + ], + [ + 1611, + 492 + ], + [ + 1611, + 500 + ], + [ + 1618, + 514 + ], + [ + 1618, + 522 + ], + [ + 1609, + 550 + ], + [ + 1608, + 570 + ], + [ + 1606, + 586 + ], + [ + 1602, + 595 + ], + [ + 1592, + 600 + ], + [ + 1578, + 600 + ], + [ + 1568, + 594 + ], + [ + 1565, + 587 + ], + [ + 1561, + 574 + ], + [ + 1504, + 568 + ], + [ + 1387, + 567 + ], + [ + 1384, + 580 + ], + [ + 1374, + 593 + ], + [ + 1360, + 596 + ], + [ + 1342, + 593 + ], + [ + 1338, + 585 + ], + [ + 1334, + 569 + ], + [ + 1314, + 567 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1455, + 492 + ], + [ + 1455, + 472 + ], + [ + 1537, + 474 + ], + [ + 1536, + 495 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000083_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000083_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8cac184e0cbfa7a3ce612fc4cf18a15f301b4f7d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000083_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000084_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000084_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..d982551551a674ff59e987f33be01e254569fdba --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000084_000019_gtFine_polygons.json @@ -0,0 +1,6522 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1148, + 240 + ], + [ + 1025, + 404 + ], + [ + 970, + 421 + ], + [ + 692, + 339 + ], + [ + 619, + 0 + ], + [ + 1131, + 0 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 1452, + 0 + ], + [ + 1458, + 146 + ], + [ + 1552, + 213 + ], + [ + 1828, + 216 + ], + [ + 2048, + 190 + ], + [ + 2046, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2048, + 401 + ], + [ + 973, + 421 + ], + [ + 650, + 423 + ], + [ + 0, + 421 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 991, + 259 + ], + [ + 992, + 401 + ], + [ + 1053, + 365 + ], + [ + 1079, + 256 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1110, + 235 + ], + [ + 1110, + 1 + ], + [ + 1498, + 2 + ], + [ + 1497, + 159 + ], + [ + 1557, + 179 + ], + [ + 1787, + 181 + ], + [ + 1898, + 177 + ], + [ + 2001, + 152 + ], + [ + 2048, + 141 + ], + [ + 2047, + 498 + ], + [ + 1602, + 498 + ], + [ + 1377, + 471 + ], + [ + 1336, + 466 + ], + [ + 1209, + 452 + ], + [ + 1129, + 407 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 782, + 450 + ], + [ + 737, + 462 + ], + [ + 696, + 464 + ], + [ + 609, + 471 + ], + [ + 485, + 483 + ], + [ + 382, + 492 + ], + [ + 337, + 498 + ], + [ + 229, + 505 + ], + [ + 29, + 510 + ], + [ + 0, + 511 + ], + [ + 0, + 431 + ], + [ + 726, + 400 + ], + [ + 815, + 424 + ], + [ + 824, + 430 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 720, + 73 + ], + [ + 706, + 71 + ], + [ + 704, + 32 + ], + [ + 700, + 20 + ], + [ + 700, + 1 + ], + [ + 696, + 32 + ], + [ + 696, + 45 + ], + [ + 672, + 19 + ], + [ + 674, + 7 + ], + [ + 669, + 0 + ], + [ + 0, + 1 + ], + [ + 0, + 485 + ], + [ + 128, + 483 + ], + [ + 232, + 479 + ], + [ + 401, + 471 + ], + [ + 455, + 465 + ], + [ + 497, + 465 + ], + [ + 634, + 458 + ], + [ + 688, + 454 + ], + [ + 727, + 454 + ], + [ + 755, + 451 + ], + [ + 788, + 452 + ], + [ + 868, + 406 + ], + [ + 848, + 335 + ], + [ + 773, + 336 + ], + [ + 773, + 313 + ], + [ + 713, + 313 + ], + [ + 708, + 150 + ], + [ + 733, + 106 + ], + [ + 732, + 103 + ], + [ + 719, + 100 + ], + [ + 717, + 96 + ], + [ + 726, + 94 + ], + [ + 729, + 90 + ], + [ + 724, + 87 + ], + [ + 708, + 86 + ], + [ + 708, + 80 + ], + [ + 721, + 79 + ], + [ + 724, + 76 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 959, + 381 + ], + [ + 948, + 381 + ], + [ + 947, + 379 + ], + [ + 921, + 379 + ], + [ + 911, + 383 + ], + [ + 907, + 400 + ], + [ + 989, + 466 + ], + [ + 1078, + 462 + ], + [ + 1154, + 408 + ], + [ + 1130, + 371 + ], + [ + 1024, + 369 + ], + [ + 1009, + 372 + ], + [ + 1002, + 377 + ], + [ + 998, + 388 + ], + [ + 993, + 390 + ], + [ + 988, + 396 + ], + [ + 981, + 399 + ], + [ + 969, + 400 + ], + [ + 968, + 391 + ], + [ + 963, + 385 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 693, + 362 + ], + [ + 702, + 359 + ], + [ + 707, + 354 + ], + [ + 712, + 351 + ], + [ + 711, + 343 + ], + [ + 715, + 335 + ], + [ + 724, + 336 + ], + [ + 732, + 337 + ], + [ + 735, + 345 + ], + [ + 739, + 349 + ], + [ + 747, + 356 + ], + [ + 752, + 359 + ], + [ + 762, + 344 + ], + [ + 767, + 330 + ], + [ + 770, + 326 + ], + [ + 776, + 332 + ], + [ + 779, + 336 + ], + [ + 797, + 350 + ], + [ + 809, + 350 + ], + [ + 813, + 342 + ], + [ + 818, + 340 + ], + [ + 815, + 332 + ], + [ + 819, + 331 + ], + [ + 826, + 320 + ], + [ + 830, + 316 + ], + [ + 830, + 302 + ], + [ + 836, + 309 + ], + [ + 842, + 315 + ], + [ + 844, + 324 + ], + [ + 849, + 317 + ], + [ + 850, + 312 + ], + [ + 846, + 309 + ], + [ + 841, + 301 + ], + [ + 844, + 298 + ], + [ + 845, + 292 + ], + [ + 840, + 286 + ], + [ + 851, + 288 + ], + [ + 851, + 282 + ], + [ + 844, + 276 + ], + [ + 842, + 263 + ], + [ + 843, + 251 + ], + [ + 850, + 246 + ], + [ + 861, + 245 + ], + [ + 865, + 244 + ], + [ + 870, + 246 + ], + [ + 873, + 253 + ], + [ + 869, + 257 + ], + [ + 868, + 265 + ], + [ + 870, + 271 + ], + [ + 867, + 275 + ], + [ + 868, + 281 + ], + [ + 866, + 284 + ], + [ + 870, + 292 + ], + [ + 874, + 287 + ], + [ + 877, + 288 + ], + [ + 884, + 296 + ], + [ + 888, + 306 + ], + [ + 884, + 316 + ], + [ + 885, + 320 + ], + [ + 891, + 318 + ], + [ + 900, + 320 + ], + [ + 894, + 323 + ], + [ + 894, + 326 + ], + [ + 907, + 332 + ], + [ + 908, + 341 + ], + [ + 908, + 347 + ], + [ + 911, + 350 + ], + [ + 911, + 358 + ], + [ + 909, + 361 + ], + [ + 914, + 361 + ], + [ + 921, + 363 + ], + [ + 921, + 368 + ], + [ + 921, + 373 + ], + [ + 926, + 372 + ], + [ + 930, + 375 + ], + [ + 932, + 379 + ], + [ + 934, + 387 + ], + [ + 929, + 397 + ], + [ + 922, + 406 + ], + [ + 787, + 437 + ], + [ + 700, + 381 + ], + [ + 699, + 370 + ], + [ + 693, + 366 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 827, + 439 + ], + [ + 800, + 428 + ], + [ + 790, + 429 + ], + [ + 791, + 415 + ], + [ + 792, + 400 + ], + [ + 778, + 400 + ], + [ + 772, + 403 + ], + [ + 770, + 391 + ], + [ + 767, + 392 + ], + [ + 768, + 403 + ], + [ + 760, + 399 + ], + [ + 761, + 385 + ], + [ + 741, + 385 + ], + [ + 741, + 380 + ], + [ + 735, + 376 + ], + [ + 728, + 374 + ], + [ + 706, + 373 + ], + [ + 700, + 373 + ], + [ + 699, + 390 + ], + [ + 686, + 397 + ], + [ + 686, + 455 + ], + [ + 747, + 455 + ], + [ + 780, + 454 + ], + [ + 808, + 447 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1031, + 481 + ], + [ + 995, + 480 + ], + [ + 991, + 453 + ], + [ + 1002, + 453 + ], + [ + 1027, + 456 + ], + [ + 1046, + 463 + ], + [ + 1035, + 480 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 781, + 467 + ], + [ + 760, + 473 + ], + [ + 739, + 479 + ], + [ + 733, + 484 + ], + [ + 727, + 488 + ], + [ + 727, + 492 + ], + [ + 779, + 491 + ], + [ + 788, + 476 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 716, + 322 + ], + [ + 718, + 432 + ], + [ + 718, + 455 + ], + [ + 721, + 455 + ], + [ + 722, + 322 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 709, + 431 + ], + [ + 709, + 455 + ], + [ + 719, + 455 + ], + [ + 719, + 431 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 728, + 284 + ], + [ + 731, + 455 + ], + [ + 736, + 455 + ], + [ + 734, + 284 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 738, + 288 + ], + [ + 737, + 289 + ], + [ + 726, + 289 + ], + [ + 724, + 287 + ], + [ + 715, + 287 + ], + [ + 715, + 289 + ], + [ + 715, + 290 + ], + [ + 725, + 291 + ], + [ + 726, + 290 + ], + [ + 737, + 290 + ], + [ + 738, + 291 + ], + [ + 748, + 290 + ], + [ + 748, + 290 + ], + [ + 746, + 287 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 752, + 305 + ], + [ + 753, + 455 + ], + [ + 758, + 455 + ], + [ + 757, + 305 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 783, + 320 + ], + [ + 788, + 323 + ], + [ + 793, + 324 + ], + [ + 796, + 329 + ], + [ + 795, + 331 + ], + [ + 793, + 332 + ], + [ + 793, + 337 + ], + [ + 794, + 339 + ], + [ + 796, + 341 + ], + [ + 796, + 344 + ], + [ + 793, + 345 + ], + [ + 794, + 368 + ], + [ + 780, + 370 + ], + [ + 780, + 320 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 767, + 336 + ], + [ + 756, + 337 + ], + [ + 756, + 342 + ], + [ + 764, + 344 + ], + [ + 764, + 349 + ], + [ + 756, + 350 + ], + [ + 756, + 356 + ], + [ + 764, + 356 + ], + [ + 765, + 362 + ], + [ + 757, + 364 + ], + [ + 756, + 368 + ], + [ + 765, + 371 + ], + [ + 766, + 374 + ], + [ + 780, + 379 + ], + [ + 779, + 334 + ], + [ + 774, + 334 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 805, + 357 + ], + [ + 805, + 349 + ], + [ + 791, + 349 + ], + [ + 782, + 348 + ], + [ + 782, + 377 + ], + [ + 797, + 371 + ], + [ + 805, + 369 + ], + [ + 805, + 363 + ], + [ + 795, + 363 + ], + [ + 795, + 358 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1041, + 255 + ], + [ + 1044, + 452 + ], + [ + 1050, + 449 + ], + [ + 1050, + 436 + ], + [ + 1046, + 254 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1022, + 361 + ], + [ + 1024, + 357 + ], + [ + 1025, + 345 + ], + [ + 1021, + 342 + ], + [ + 1022, + 340 + ], + [ + 1028, + 332 + ], + [ + 1029, + 328 + ], + [ + 1035, + 325 + ], + [ + 1032, + 305 + ], + [ + 1038, + 294 + ], + [ + 1048, + 287 + ], + [ + 1053, + 275 + ], + [ + 1059, + 272 + ], + [ + 1058, + 257 + ], + [ + 1050, + 260 + ], + [ + 1046, + 252 + ], + [ + 1045, + 239 + ], + [ + 1053, + 240 + ], + [ + 1054, + 232 + ], + [ + 1051, + 226 + ], + [ + 1062, + 225 + ], + [ + 1069, + 210 + ], + [ + 1079, + 212 + ], + [ + 1084, + 217 + ], + [ + 1091, + 225 + ], + [ + 1096, + 222 + ], + [ + 1097, + 218 + ], + [ + 1101, + 220 + ], + [ + 1103, + 227 + ], + [ + 1111, + 225 + ], + [ + 1113, + 221 + ], + [ + 1113, + 206 + ], + [ + 1102, + 206 + ], + [ + 1101, + 199 + ], + [ + 1106, + 190 + ], + [ + 1100, + 187 + ], + [ + 1102, + 180 + ], + [ + 1113, + 166 + ], + [ + 1120, + 158 + ], + [ + 1125, + 156 + ], + [ + 1131, + 161 + ], + [ + 1145, + 153 + ], + [ + 1149, + 153 + ], + [ + 1152, + 142 + ], + [ + 1152, + 126 + ], + [ + 1153, + 112 + ], + [ + 1145, + 104 + ], + [ + 1157, + 102 + ], + [ + 1170, + 105 + ], + [ + 1179, + 90 + ], + [ + 1187, + 87 + ], + [ + 1190, + 74 + ], + [ + 1194, + 65 + ], + [ + 1190, + 61 + ], + [ + 1184, + 61 + ], + [ + 1191, + 50 + ], + [ + 1201, + 55 + ], + [ + 1207, + 49 + ], + [ + 1216, + 45 + ], + [ + 1212, + 39 + ], + [ + 1221, + 13 + ], + [ + 1228, + 19 + ], + [ + 1231, + 34 + ], + [ + 1242, + 29 + ], + [ + 1240, + 12 + ], + [ + 1253, + 20 + ], + [ + 1257, + 27 + ], + [ + 1249, + 37 + ], + [ + 1250, + 50 + ], + [ + 1261, + 45 + ], + [ + 1274, + 35 + ], + [ + 1283, + 21 + ], + [ + 1294, + 11 + ], + [ + 1301, + 1 + ], + [ + 1716, + 1 + ], + [ + 1712, + 6 + ], + [ + 1712, + 19 + ], + [ + 1735, + 21 + ], + [ + 1752, + 28 + ], + [ + 1777, + 30 + ], + [ + 1784, + 35 + ], + [ + 1799, + 51 + ], + [ + 1788, + 55 + ], + [ + 1794, + 67 + ], + [ + 1791, + 72 + ], + [ + 1805, + 82 + ], + [ + 1796, + 82 + ], + [ + 1782, + 75 + ], + [ + 1787, + 89 + ], + [ + 1779, + 93 + ], + [ + 1781, + 108 + ], + [ + 1773, + 116 + ], + [ + 1737, + 104 + ], + [ + 1733, + 111 + ], + [ + 1747, + 119 + ], + [ + 1751, + 132 + ], + [ + 1745, + 143 + ], + [ + 1751, + 155 + ], + [ + 1747, + 166 + ], + [ + 1743, + 168 + ], + [ + 1750, + 184 + ], + [ + 1749, + 201 + ], + [ + 1765, + 192 + ], + [ + 1774, + 196 + ], + [ + 1779, + 181 + ], + [ + 1791, + 171 + ], + [ + 1796, + 163 + ], + [ + 1809, + 147 + ], + [ + 1818, + 150 + ], + [ + 1828, + 143 + ], + [ + 1842, + 143 + ], + [ + 1846, + 148 + ], + [ + 1854, + 150 + ], + [ + 1859, + 138 + ], + [ + 1871, + 139 + ], + [ + 1875, + 150 + ], + [ + 1875, + 154 + ], + [ + 1877, + 165 + ], + [ + 1890, + 159 + ], + [ + 1889, + 169 + ], + [ + 1900, + 166 + ], + [ + 1905, + 177 + ], + [ + 1911, + 197 + ], + [ + 1916, + 201 + ], + [ + 1921, + 208 + ], + [ + 1925, + 220 + ], + [ + 1928, + 236 + ], + [ + 1931, + 242 + ], + [ + 1943, + 225 + ], + [ + 1958, + 222 + ], + [ + 1963, + 221 + ], + [ + 1968, + 205 + ], + [ + 1978, + 203 + ], + [ + 1988, + 200 + ], + [ + 1988, + 220 + ], + [ + 1994, + 216 + ], + [ + 1997, + 193 + ], + [ + 1998, + 166 + ], + [ + 2036, + 102 + ], + [ + 2048, + 105 + ], + [ + 2047, + 494 + ], + [ + 1603, + 496 + ], + [ + 1375, + 472 + ], + [ + 1351, + 472 + ], + [ + 1258, + 469 + ], + [ + 1109, + 403 + ], + [ + 1109, + 393 + ], + [ + 1099, + 396 + ], + [ + 1084, + 394 + ], + [ + 1075, + 388 + ], + [ + 1066, + 388 + ], + [ + 1062, + 387 + ], + [ + 1053, + 396 + ], + [ + 1042, + 382 + ], + [ + 1041, + 378 + ], + [ + 1034, + 378 + ], + [ + 1024, + 372 + ], + [ + 1024, + 366 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 777, + 294 + ], + [ + 778, + 472 + ], + [ + 779, + 482 + ], + [ + 786, + 472 + ], + [ + 784, + 443 + ], + [ + 783, + 294 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2030, + 482 + ], + [ + 1923, + 479 + ], + [ + 1870, + 476 + ], + [ + 1870, + 485 + ], + [ + 1779, + 484 + ], + [ + 1678, + 484 + ], + [ + 1637, + 487 + ], + [ + 1602, + 490 + ], + [ + 1606, + 507 + ], + [ + 1632, + 511 + ], + [ + 1726, + 513 + ], + [ + 1831, + 515 + ], + [ + 1959, + 515 + ], + [ + 2047, + 520 + ], + [ + 2047, + 483 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1596, + 278 + ], + [ + 1593, + 1 + ], + [ + 1609, + 1 + ], + [ + 1608, + 428 + ], + [ + 1594, + 426 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1865, + 550 + ], + [ + 1887, + 546 + ], + [ + 1964, + 541 + ], + [ + 2003, + 539 + ], + [ + 2047, + 536 + ], + [ + 2047, + 627 + ], + [ + 1954, + 610 + ], + [ + 1869, + 591 + ], + [ + 1826, + 581 + ], + [ + 1806, + 573 + ], + [ + 1802, + 568 + ], + [ + 1803, + 563 + ], + [ + 1810, + 559 + ], + [ + 1840, + 553 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1706, + 334 + ], + [ + 1705, + 408 + ], + [ + 1715, + 409 + ], + [ + 1717, + 334 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1240, + 282 + ], + [ + 1240, + 423 + ], + [ + 1247, + 430 + ], + [ + 1245, + 281 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1279, + 445 + ], + [ + 1266, + 434 + ], + [ + 1246, + 434 + ], + [ + 1260, + 474 + ], + [ + 1276, + 467 + ], + [ + 1280, + 448 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1233, + 359 + ], + [ + 1207, + 354 + ], + [ + 1186, + 353 + ], + [ + 1186, + 356 + ], + [ + 1208, + 356 + ], + [ + 1235, + 363 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1169, + 346 + ], + [ + 1167, + 351 + ], + [ + 1157, + 354 + ], + [ + 1157, + 359 + ], + [ + 1167, + 364 + ], + [ + 1157, + 366 + ], + [ + 1157, + 372 + ], + [ + 1164, + 374 + ], + [ + 1168, + 378 + ], + [ + 1179, + 381 + ], + [ + 1180, + 345 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1179, + 323 + ], + [ + 1179, + 405 + ], + [ + 1184, + 405 + ], + [ + 1184, + 323 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1195, + 336 + ], + [ + 1181, + 336 + ], + [ + 1182, + 379 + ], + [ + 1195, + 378 + ], + [ + 1203, + 371 + ], + [ + 1204, + 365 + ], + [ + 1195, + 364 + ], + [ + 1195, + 362 + ], + [ + 1203, + 359 + ], + [ + 1206, + 352 + ], + [ + 1194, + 351 + ], + [ + 1196, + 347 + ], + [ + 1202, + 346 + ], + [ + 1205, + 339 + ], + [ + 1196, + 339 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1269, + 357 + ], + [ + 1231, + 358 + ], + [ + 1232, + 403 + ], + [ + 1269, + 404 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1341, + 427 + ], + [ + 1312, + 424 + ], + [ + 1290, + 425 + ], + [ + 1282, + 428 + ], + [ + 1277, + 439 + ], + [ + 1275, + 443 + ], + [ + 1270, + 442 + ], + [ + 1266, + 442 + ], + [ + 1264, + 445 + ], + [ + 1265, + 449 + ], + [ + 1272, + 451 + ], + [ + 1271, + 459 + ], + [ + 1267, + 475 + ], + [ + 1273, + 497 + ], + [ + 1281, + 497 + ], + [ + 1282, + 490 + ], + [ + 1292, + 487 + ], + [ + 1316, + 488 + ], + [ + 1342, + 491 + ], + [ + 1343, + 500 + ], + [ + 1355, + 499 + ], + [ + 1355, + 482 + ], + [ + 1356, + 467 + ], + [ + 1356, + 453 + ], + [ + 1361, + 452 + ], + [ + 1364, + 449 + ], + [ + 1364, + 446 + ], + [ + 1360, + 445 + ], + [ + 1355, + 447 + ], + [ + 1354, + 448 + ], + [ + 1351, + 438 + ], + [ + 1346, + 429 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1583, + 425 + ], + [ + 1583, + 449 + ], + [ + 1602, + 490 + ], + [ + 1650, + 488 + ], + [ + 1649, + 425 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1600, + 499 + ], + [ + 1605, + 481 + ], + [ + 1604, + 453 + ], + [ + 1603, + 447 + ], + [ + 1605, + 343 + ], + [ + 1602, + 344 + ], + [ + 1599, + 449 + ], + [ + 1596, + 456 + ], + [ + 1596, + 476 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1587, + 346 + ], + [ + 1589, + 352 + ], + [ + 1598, + 355 + ], + [ + 1598, + 359 + ], + [ + 1588, + 361 + ], + [ + 1591, + 366 + ], + [ + 1598, + 367 + ], + [ + 1598, + 372 + ], + [ + 1590, + 373 + ], + [ + 1593, + 380 + ], + [ + 1600, + 382 + ], + [ + 1601, + 385 + ], + [ + 1609, + 385 + ], + [ + 1608, + 342 + ], + [ + 1599, + 342 + ], + [ + 1598, + 346 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1616, + 462 + ], + [ + 1612, + 468 + ], + [ + 1612, + 494 + ], + [ + 1607, + 502 + ], + [ + 1631, + 502 + ], + [ + 1624, + 495 + ], + [ + 1623, + 466 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 617, + 474 + ], + [ + 617, + 459 + ], + [ + 613, + 444 + ], + [ + 602, + 430 + ], + [ + 597, + 426 + ], + [ + 589, + 426 + ], + [ + 585, + 428 + ], + [ + 570, + 428 + ], + [ + 556, + 429 + ], + [ + 529, + 473 + ], + [ + 529, + 488 + ], + [ + 535, + 492 + ], + [ + 554, + 489 + ], + [ + 557, + 485 + ], + [ + 576, + 483 + ], + [ + 580, + 482 + ], + [ + 586, + 482 + ], + [ + 588, + 487 + ], + [ + 596, + 487 + ], + [ + 600, + 484 + ], + [ + 600, + 479 + ], + [ + 606, + 479 + ], + [ + 607, + 484 + ], + [ + 609, + 486 + ], + [ + 615, + 486 + ], + [ + 617, + 480 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 583, + 488 + ], + [ + 547, + 489 + ], + [ + 514, + 489 + ], + [ + 485, + 491 + ], + [ + 461, + 496 + ], + [ + 462, + 503 + ], + [ + 499, + 503 + ], + [ + 580, + 502 + ], + [ + 636, + 501 + ], + [ + 656, + 501 + ], + [ + 669, + 501 + ], + [ + 666, + 494 + ], + [ + 617, + 491 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 393, + 602 + ], + [ + 291, + 632 + ], + [ + 0, + 707 + ], + [ + 0, + 570 + ], + [ + 61, + 560 + ], + [ + 141, + 546 + ], + [ + 199, + 537 + ], + [ + 232, + 530 + ], + [ + 267, + 521 + ], + [ + 309, + 516 + ], + [ + 355, + 516 + ], + [ + 439, + 513 + ], + [ + 466, + 513 + ], + [ + 519, + 512 + ], + [ + 565, + 513 + ], + [ + 586, + 514 + ], + [ + 591, + 518 + ], + [ + 602, + 527 + ], + [ + 615, + 529 + ], + [ + 620, + 533 + ], + [ + 623, + 537 + ], + [ + 620, + 540 + ], + [ + 598, + 545 + ], + [ + 533, + 560 + ], + [ + 394, + 596 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 221, + 467 + ], + [ + 222, + 497 + ], + [ + 237, + 498 + ], + [ + 242, + 471 + ], + [ + 237, + 466 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 458, + 239 + ], + [ + 458, + 189 + ], + [ + 436, + 189 + ], + [ + 436, + 239 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 231, + 401 + ], + [ + 228, + 340 + ], + [ + 225, + 258 + ], + [ + 228, + 245 + ], + [ + 234, + 232 + ], + [ + 244, + 228 + ], + [ + 259, + 224 + ], + [ + 304, + 216 + ], + [ + 394, + 202 + ], + [ + 436, + 202 + ], + [ + 447, + 204 + ], + [ + 447, + 200 + ], + [ + 435, + 198 + ], + [ + 395, + 199 + ], + [ + 286, + 215 + ], + [ + 250, + 222 + ], + [ + 234, + 227 + ], + [ + 227, + 235 + ], + [ + 223, + 246 + ], + [ + 221, + 260 + ], + [ + 228, + 412 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 198, + 350 + ], + [ + 183, + 366 + ], + [ + 199, + 380 + ], + [ + 213, + 363 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 214, + 345 + ], + [ + 214, + 389 + ], + [ + 232, + 387 + ], + [ + 232, + 347 + ], + [ + 222, + 346 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 189, + 320 + ], + [ + 189, + 332 + ], + [ + 198, + 332 + ], + [ + 198, + 340 + ], + [ + 214, + 340 + ], + [ + 215, + 333 + ], + [ + 225, + 333 + ], + [ + 224, + 320 + ], + [ + 214, + 319 + ], + [ + 214, + 314 + ], + [ + 198, + 314 + ], + [ + 197, + 320 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 254, + 296 + ], + [ + 222, + 295 + ], + [ + 223, + 308 + ], + [ + 233, + 308 + ], + [ + 233, + 313 + ], + [ + 245, + 315 + ], + [ + 246, + 308 + ], + [ + 254, + 308 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 285, + 457 + ], + [ + 287, + 444 + ], + [ + 277, + 419 + ], + [ + 276, + 405 + ], + [ + 266, + 402 + ], + [ + 269, + 394 + ], + [ + 268, + 381 + ], + [ + 264, + 376 + ], + [ + 262, + 365 + ], + [ + 257, + 360 + ], + [ + 245, + 360 + ], + [ + 237, + 365 + ], + [ + 234, + 378 + ], + [ + 234, + 387 + ], + [ + 228, + 400 + ], + [ + 224, + 411 + ], + [ + 223, + 429 + ], + [ + 223, + 465 + ], + [ + 231, + 470 + ], + [ + 234, + 483 + ], + [ + 230, + 497 + ], + [ + 229, + 514 + ], + [ + 231, + 531 + ], + [ + 230, + 553 + ], + [ + 248, + 553 + ], + [ + 259, + 552 + ], + [ + 269, + 552 + ], + [ + 283, + 549 + ], + [ + 278, + 545 + ], + [ + 269, + 541 + ], + [ + 270, + 528 + ], + [ + 277, + 501 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 120, + 568 + ], + [ + 89, + 572 + ], + [ + 66, + 572 + ], + [ + 31, + 572 + ], + [ + 0, + 574 + ], + [ + 0, + 681 + ], + [ + 124, + 654 + ], + [ + 229, + 628 + ], + [ + 297, + 612 + ], + [ + 319, + 609 + ], + [ + 338, + 605 + ], + [ + 341, + 601 + ], + [ + 354, + 598 + ], + [ + 369, + 595 + ], + [ + 379, + 590 + ], + [ + 373, + 587 + ], + [ + 333, + 581 + ], + [ + 310, + 582 + ], + [ + 264, + 580 + ], + [ + 231, + 573 + ], + [ + 199, + 571 + ], + [ + 182, + 572 + ], + [ + 147, + 569 + ], + [ + 131, + 568 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 369, + 565 + ], + [ + 361, + 561 + ], + [ + 347, + 558 + ], + [ + 322, + 557 + ], + [ + 311, + 556 + ], + [ + 295, + 567 + ], + [ + 295, + 569 + ], + [ + 338, + 567 + ], + [ + 357, + 567 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 451, + 257 + ], + [ + 452, + 514 + ], + [ + 458, + 514 + ], + [ + 456, + 257 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 529, + 358 + ], + [ + 529, + 379 + ], + [ + 531, + 513 + ], + [ + 538, + 513 + ], + [ + 535, + 357 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 559, + 289 + ], + [ + 549, + 291 + ], + [ + 545, + 296 + ], + [ + 543, + 303 + ], + [ + 547, + 319 + ], + [ + 553, + 323 + ], + [ + 561, + 324 + ], + [ + 569, + 320 + ], + [ + 573, + 311 + ], + [ + 573, + 301 + ], + [ + 568, + 292 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 545, + 362 + ], + [ + 547, + 301 + ], + [ + 511, + 300 + ], + [ + 509, + 361 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 515, + 531 + ], + [ + 514, + 471 + ], + [ + 510, + 225 + ], + [ + 508, + 186 + ], + [ + 497, + 185 + ], + [ + 499, + 225 + ], + [ + 502, + 370 + ], + [ + 499, + 531 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 357, + 445 + ], + [ + 352, + 436 + ], + [ + 344, + 435 + ], + [ + 338, + 437 + ], + [ + 332, + 453 + ], + [ + 326, + 469 + ], + [ + 316, + 470 + ], + [ + 306, + 472 + ], + [ + 301, + 464 + ], + [ + 289, + 459 + ], + [ + 283, + 452 + ], + [ + 277, + 447 + ], + [ + 272, + 447 + ], + [ + 265, + 450 + ], + [ + 260, + 459 + ], + [ + 260, + 475 + ], + [ + 263, + 489 + ], + [ + 265, + 531 + ], + [ + 281, + 532 + ], + [ + 277, + 543 + ], + [ + 280, + 551 + ], + [ + 283, + 555 + ], + [ + 309, + 553 + ], + [ + 317, + 548 + ], + [ + 320, + 542 + ], + [ + 328, + 541 + ], + [ + 347, + 552 + ], + [ + 357, + 551 + ], + [ + 365, + 544 + ], + [ + 363, + 533 + ], + [ + 361, + 521 + ], + [ + 355, + 512 + ], + [ + 343, + 505 + ], + [ + 343, + 498 + ], + [ + 348, + 494 + ], + [ + 363, + 488 + ], + [ + 371, + 479 + ], + [ + 374, + 470 + ], + [ + 372, + 453 + ], + [ + 360, + 447 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 324, + 433 + ], + [ + 323, + 450 + ], + [ + 347, + 450 + ], + [ + 347, + 432 + ], + [ + 339, + 426 + ], + [ + 332, + 426 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 326, + 160 + ], + [ + 331, + 455 + ], + [ + 325, + 466 + ], + [ + 328, + 560 + ], + [ + 350, + 560 + ], + [ + 348, + 466 + ], + [ + 342, + 454 + ], + [ + 340, + 160 + ], + [ + 334, + 158 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 544, + 372 + ], + [ + 533, + 371 + ], + [ + 526, + 375 + ], + [ + 521, + 400 + ], + [ + 514, + 404 + ], + [ + 512, + 476 + ], + [ + 518, + 479 + ], + [ + 520, + 493 + ], + [ + 519, + 521 + ], + [ + 520, + 535 + ], + [ + 519, + 547 + ], + [ + 520, + 549 + ], + [ + 533, + 547 + ], + [ + 531, + 544 + ], + [ + 528, + 543 + ], + [ + 529, + 525 + ], + [ + 530, + 510 + ], + [ + 532, + 490 + ], + [ + 535, + 481 + ], + [ + 537, + 497 + ], + [ + 536, + 511 + ], + [ + 535, + 529 + ], + [ + 535, + 542 + ], + [ + 536, + 546 + ], + [ + 546, + 547 + ], + [ + 555, + 546 + ], + [ + 555, + 543 + ], + [ + 550, + 540 + ], + [ + 546, + 535 + ], + [ + 546, + 519 + ], + [ + 550, + 500 + ], + [ + 549, + 491 + ], + [ + 554, + 481 + ], + [ + 559, + 475 + ], + [ + 556, + 457 + ], + [ + 564, + 451 + ], + [ + 564, + 444 + ], + [ + 560, + 424 + ], + [ + 560, + 413 + ], + [ + 559, + 405 + ], + [ + 555, + 400 + ], + [ + 555, + 391 + ], + [ + 552, + 382 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 495, + 366 + ], + [ + 492, + 372 + ], + [ + 492, + 379 + ], + [ + 493, + 386 + ], + [ + 487, + 393 + ], + [ + 484, + 400 + ], + [ + 483, + 417 + ], + [ + 483, + 432 + ], + [ + 487, + 448 + ], + [ + 484, + 461 + ], + [ + 489, + 466 + ], + [ + 488, + 479 + ], + [ + 487, + 495 + ], + [ + 483, + 510 + ], + [ + 484, + 527 + ], + [ + 487, + 536 + ], + [ + 486, + 550 + ], + [ + 502, + 551 + ], + [ + 511, + 551 + ], + [ + 522, + 547 + ], + [ + 519, + 543 + ], + [ + 513, + 542 + ], + [ + 505, + 536 + ], + [ + 502, + 529 + ], + [ + 502, + 506 + ], + [ + 507, + 498 + ], + [ + 512, + 481 + ], + [ + 517, + 472 + ], + [ + 521, + 466 + ], + [ + 523, + 457 + ], + [ + 523, + 445 + ], + [ + 521, + 425 + ], + [ + 519, + 404 + ], + [ + 512, + 394 + ], + [ + 515, + 386 + ], + [ + 513, + 371 + ], + [ + 507, + 365 + ], + [ + 500, + 365 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 534, + 254 + ], + [ + 524, + 253 + ], + [ + 509, + 247 + ], + [ + 509, + 253 + ], + [ + 515, + 255 + ], + [ + 518, + 326 + ], + [ + 511, + 327 + ], + [ + 511, + 331 + ], + [ + 538, + 332 + ], + [ + 538, + 320 + ], + [ + 552, + 318 + ], + [ + 552, + 306 + ], + [ + 536, + 304 + ], + [ + 536, + 296 + ], + [ + 552, + 293 + ], + [ + 552, + 281 + ], + [ + 536, + 280 + ], + [ + 536, + 272 + ], + [ + 553, + 268 + ], + [ + 553, + 258 + ], + [ + 535, + 255 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 489, + 250 + ], + [ + 488, + 254 + ], + [ + 477, + 254 + ], + [ + 476, + 257 + ], + [ + 456, + 257 + ], + [ + 456, + 265 + ], + [ + 461, + 267 + ], + [ + 474, + 271 + ], + [ + 477, + 273 + ], + [ + 477, + 280 + ], + [ + 461, + 283 + ], + [ + 458, + 284 + ], + [ + 458, + 291 + ], + [ + 477, + 295 + ], + [ + 477, + 304 + ], + [ + 460, + 307 + ], + [ + 460, + 317 + ], + [ + 477, + 319 + ], + [ + 477, + 327 + ], + [ + 489, + 327 + ], + [ + 490, + 330 + ], + [ + 503, + 332 + ], + [ + 502, + 249 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 580, + 170 + ], + [ + 479, + 165 + ], + [ + 454, + 178 + ], + [ + 477, + 194 + ], + [ + 581, + 198 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 582, + 231 + ], + [ + 582, + 199 + ], + [ + 454, + 195 + ], + [ + 452, + 227 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 316, + 194 + ], + [ + 295, + 194 + ], + [ + 293, + 198 + ], + [ + 270, + 198 + ], + [ + 270, + 210 + ], + [ + 293, + 220 + ], + [ + 294, + 230 + ], + [ + 268, + 231 + ], + [ + 268, + 244 + ], + [ + 295, + 250 + ], + [ + 294, + 264 + ], + [ + 270, + 264 + ], + [ + 271, + 278 + ], + [ + 295, + 282 + ], + [ + 295, + 292 + ], + [ + 314, + 296 + ], + [ + 331, + 299 + ], + [ + 330, + 194 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 373, + 193 + ], + [ + 338, + 193 + ], + [ + 338, + 292 + ], + [ + 340, + 298 + ], + [ + 359, + 296 + ], + [ + 361, + 292 + ], + [ + 376, + 292 + ], + [ + 377, + 284 + ], + [ + 397, + 279 + ], + [ + 398, + 263 + ], + [ + 377, + 263 + ], + [ + 376, + 253 + ], + [ + 398, + 246 + ], + [ + 398, + 232 + ], + [ + 373, + 230 + ], + [ + 373, + 217 + ], + [ + 397, + 213 + ], + [ + 397, + 198 + ], + [ + 374, + 197 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 344, + 302 + ], + [ + 328, + 302 + ], + [ + 325, + 295 + ], + [ + 315, + 292 + ], + [ + 315, + 191 + ], + [ + 350, + 191 + ], + [ + 350, + 290 + ], + [ + 342, + 291 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 948, + 397 + ], + [ + 908, + 394 + ], + [ + 849, + 395 + ], + [ + 834, + 399 + ], + [ + 813, + 411 + ], + [ + 798, + 429 + ], + [ + 789, + 432 + ], + [ + 787, + 439 + ], + [ + 788, + 444 + ], + [ + 793, + 448 + ], + [ + 784, + 456 + ], + [ + 777, + 471 + ], + [ + 776, + 490 + ], + [ + 776, + 512 + ], + [ + 778, + 519 + ], + [ + 782, + 524 + ], + [ + 791, + 527 + ], + [ + 802, + 524 + ], + [ + 804, + 519 + ], + [ + 806, + 513 + ], + [ + 811, + 511 + ], + [ + 845, + 514 + ], + [ + 848, + 527 + ], + [ + 851, + 534 + ], + [ + 858, + 536 + ], + [ + 866, + 535 + ], + [ + 872, + 532 + ], + [ + 874, + 523 + ], + [ + 875, + 515 + ], + [ + 876, + 513 + ], + [ + 883, + 514 + ], + [ + 887, + 520 + ], + [ + 892, + 522 + ], + [ + 901, + 522 + ], + [ + 909, + 520 + ], + [ + 912, + 513 + ], + [ + 949, + 513 + ], + [ + 953, + 516 + ], + [ + 962, + 514 + ], + [ + 964, + 522 + ], + [ + 968, + 529 + ], + [ + 972, + 533 + ], + [ + 979, + 533 + ], + [ + 986, + 530 + ], + [ + 991, + 524 + ], + [ + 993, + 508 + ], + [ + 998, + 496 + ], + [ + 1001, + 479 + ], + [ + 999, + 469 + ], + [ + 998, + 454 + ], + [ + 994, + 439 + ], + [ + 975, + 413 + ], + [ + 966, + 404 + ], + [ + 957, + 400 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 961, + 478 + ], + [ + 962, + 466 + ], + [ + 917, + 466 + ], + [ + 917, + 478 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1494, + 422 + ], + [ + 1491, + 404 + ], + [ + 1488, + 397 + ], + [ + 1482, + 394 + ], + [ + 1481, + 387 + ], + [ + 1450, + 385 + ], + [ + 1393, + 386 + ], + [ + 1393, + 394 + ], + [ + 1386, + 394 + ], + [ + 1382, + 400 + ], + [ + 1376, + 429 + ], + [ + 1366, + 429 + ], + [ + 1365, + 433 + ], + [ + 1365, + 437 + ], + [ + 1374, + 438 + ], + [ + 1372, + 460 + ], + [ + 1372, + 467 + ], + [ + 1373, + 474 + ], + [ + 1374, + 477 + ], + [ + 1373, + 499 + ], + [ + 1373, + 508 + ], + [ + 1374, + 512 + ], + [ + 1379, + 514 + ], + [ + 1413, + 501 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1281, + 530 + ], + [ + 1279, + 510 + ], + [ + 1271, + 479 + ], + [ + 1270, + 476 + ], + [ + 1282, + 475 + ], + [ + 1284, + 470 + ], + [ + 1283, + 464 + ], + [ + 1279, + 460 + ], + [ + 1266, + 459 + ], + [ + 1262, + 460 + ], + [ + 1250, + 434 + ], + [ + 1234, + 408 + ], + [ + 1231, + 405 + ], + [ + 1196, + 402 + ], + [ + 1144, + 400 + ], + [ + 1072, + 403 + ], + [ + 1067, + 406 + ], + [ + 1047, + 441 + ], + [ + 1039, + 455 + ], + [ + 1029, + 455 + ], + [ + 1021, + 457 + ], + [ + 1017, + 460 + ], + [ + 1017, + 467 + ], + [ + 1019, + 472 + ], + [ + 1021, + 473 + ], + [ + 1030, + 474 + ], + [ + 1021, + 498 + ], + [ + 1019, + 521 + ], + [ + 1020, + 547 + ], + [ + 1022, + 559 + ], + [ + 1021, + 599 + ], + [ + 1024, + 610 + ], + [ + 1034, + 614 + ], + [ + 1048, + 613 + ], + [ + 1056, + 611 + ], + [ + 1058, + 601 + ], + [ + 1060, + 587 + ], + [ + 1144, + 586 + ], + [ + 1179, + 587 + ], + [ + 1244, + 590 + ], + [ + 1244, + 608 + ], + [ + 1245, + 615 + ], + [ + 1249, + 618 + ], + [ + 1268, + 618 + ], + [ + 1273, + 616 + ], + [ + 1277, + 609 + ], + [ + 1277, + 566 + ], + [ + 1281, + 539 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1191, + 514 + ], + [ + 1191, + 493 + ], + [ + 1107, + 491 + ], + [ + 1107, + 513 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1725, + 469 + ], + [ + 1723, + 403 + ], + [ + 1715, + 402 + ], + [ + 1671, + 403 + ], + [ + 1670, + 471 + ], + [ + 1670, + 497 + ], + [ + 1673, + 497 + ], + [ + 1673, + 469 + ], + [ + 1714, + 469 + ], + [ + 1714, + 502 + ], + [ + 1718, + 502 + ], + [ + 1718, + 469 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1738, + 502 + ], + [ + 1736, + 472 + ], + [ + 1729, + 464 + ], + [ + 1725, + 473 + ], + [ + 1725, + 502 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1813, + 291 + ], + [ + 1812, + 435 + ], + [ + 1810, + 455 + ], + [ + 1809, + 500 + ], + [ + 1829, + 500 + ], + [ + 1822, + 493 + ], + [ + 1820, + 455 + ], + [ + 1818, + 435 + ], + [ + 1817, + 291 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1811, + 504 + ], + [ + 1811, + 473 + ], + [ + 1806, + 465 + ], + [ + 1801, + 474 + ], + [ + 1798, + 504 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1835, + 333 + ], + [ + 1838, + 326 + ], + [ + 1836, + 311 + ], + [ + 1831, + 306 + ], + [ + 1829, + 292 + ], + [ + 1825, + 287 + ], + [ + 1820, + 288 + ], + [ + 1817, + 292 + ], + [ + 1815, + 298 + ], + [ + 1810, + 291 + ], + [ + 1806, + 287 + ], + [ + 1802, + 287 + ], + [ + 1800, + 293 + ], + [ + 1799, + 309 + ], + [ + 1794, + 315 + ], + [ + 1793, + 323 + ], + [ + 1793, + 331 + ], + [ + 1798, + 338 + ], + [ + 1811, + 339 + ], + [ + 1813, + 330 + ], + [ + 1815, + 328 + ], + [ + 1820, + 335 + ], + [ + 1822, + 336 + ], + [ + 1833, + 336 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1772, + 280 + ], + [ + 1775, + 277 + ], + [ + 1776, + 273 + ], + [ + 1776, + 234 + ], + [ + 1775, + 230 + ], + [ + 1770, + 229 + ], + [ + 1654, + 229 + ], + [ + 1650, + 231 + ], + [ + 1649, + 236 + ], + [ + 1647, + 277 + ], + [ + 1649, + 280 + ], + [ + 1654, + 281 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1655, + 282 + ], + [ + 1651, + 284 + ], + [ + 1649, + 289 + ], + [ + 1648, + 334 + ], + [ + 1649, + 338 + ], + [ + 1653, + 339 + ], + [ + 1768, + 338 + ], + [ + 1774, + 337 + ], + [ + 1777, + 332 + ], + [ + 1776, + 287 + ], + [ + 1774, + 284 + ], + [ + 1771, + 281 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1664, + 504 + ], + [ + 1665, + 474 + ], + [ + 1666, + 297 + ], + [ + 1670, + 297 + ], + [ + 1669, + 504 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1676, + 385 + ], + [ + 1676, + 374 + ], + [ + 1661, + 374 + ], + [ + 1661, + 385 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1656, + 362 + ], + [ + 1656, + 374 + ], + [ + 1670, + 374 + ], + [ + 1670, + 362 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1690, + 362 + ], + [ + 1689, + 345 + ], + [ + 1645, + 345 + ], + [ + 1645, + 361 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1709, + 315 + ], + [ + 1670, + 315 + ], + [ + 1670, + 329 + ], + [ + 1709, + 329 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1644, + 302 + ], + [ + 1645, + 317 + ], + [ + 1665, + 313 + ], + [ + 1666, + 297 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1944, + 246 + ], + [ + 1969, + 296 + ], + [ + 1976, + 304 + ], + [ + 1980, + 279 + ], + [ + 1976, + 244 + ], + [ + 1954, + 245 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1975, + 551 + ], + [ + 1976, + 480 + ], + [ + 1972, + 469 + ], + [ + 1972, + 333 + ], + [ + 1963, + 337 + ], + [ + 1964, + 468 + ], + [ + 1961, + 482 + ], + [ + 1961, + 551 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1943, + 306 + ], + [ + 1946, + 317 + ], + [ + 1956, + 320 + ], + [ + 1956, + 327 + ], + [ + 1942, + 328 + ], + [ + 1945, + 340 + ], + [ + 1958, + 344 + ], + [ + 1959, + 354 + ], + [ + 1976, + 352 + ], + [ + 1978, + 309 + ], + [ + 1975, + 280 + ], + [ + 1943, + 281 + ], + [ + 1950, + 295 + ], + [ + 1958, + 298 + ], + [ + 1961, + 304 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2001, + 586 + ], + [ + 2001, + 483 + ], + [ + 2006, + 473 + ], + [ + 2006, + 176 + ], + [ + 2007, + 6 + ], + [ + 2005, + 1 + ], + [ + 2019, + 1 + ], + [ + 2021, + 77 + ], + [ + 2019, + 225 + ], + [ + 2015, + 469 + ], + [ + 2018, + 484 + ], + [ + 2019, + 528 + ], + [ + 2017, + 586 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 2033, + 74 + ], + [ + 2006, + 75 + ], + [ + 1999, + 78 + ], + [ + 1995, + 84 + ], + [ + 1993, + 90 + ], + [ + 1995, + 95 + ], + [ + 1996, + 97 + ], + [ + 1999, + 103 + ], + [ + 1999, + 114 + ], + [ + 1995, + 118 + ], + [ + 1994, + 126 + ], + [ + 1995, + 130 + ], + [ + 1999, + 132 + ], + [ + 1999, + 147 + ], + [ + 1994, + 155 + ], + [ + 1995, + 165 + ], + [ + 2001, + 170 + ], + [ + 2004, + 178 + ], + [ + 2023, + 178 + ], + [ + 2024, + 173 + ], + [ + 2035, + 169 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1998, + 220 + ], + [ + 1962, + 222 + ], + [ + 1962, + 226 + ], + [ + 1948, + 231 + ], + [ + 1952, + 244 + ], + [ + 1971, + 247 + ], + [ + 1971, + 260 + ], + [ + 1948, + 262 + ], + [ + 1952, + 277 + ], + [ + 1972, + 280 + ], + [ + 1974, + 289 + ], + [ + 1960, + 293 + ], + [ + 1962, + 307 + ], + [ + 1976, + 311 + ], + [ + 1977, + 319 + ], + [ + 2002, + 319 + ], + [ + 2004, + 223 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 2038, + 317 + ], + [ + 2036, + 218 + ], + [ + 1999, + 218 + ], + [ + 1999, + 317 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 2048, + 172 + ], + [ + 2037, + 180 + ], + [ + 2032, + 195 + ], + [ + 2032, + 216 + ], + [ + 2036, + 232 + ], + [ + 2039, + 238 + ], + [ + 2048, + 240 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 2009, + 599 + ], + [ + 2017, + 522 + ], + [ + 2033, + 510 + ], + [ + 2048, + 524 + ], + [ + 2047, + 602 + ], + [ + 2019, + 602 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1554, + 418 + ], + [ + 1493, + 415 + ], + [ + 1472, + 414 + ], + [ + 1472, + 403 + ], + [ + 1469, + 400 + ], + [ + 1434, + 400 + ], + [ + 1434, + 408 + ], + [ + 1434, + 415 + ], + [ + 1426, + 417 + ], + [ + 1413, + 430 + ], + [ + 1397, + 463 + ], + [ + 1392, + 463 + ], + [ + 1393, + 452 + ], + [ + 1392, + 450 + ], + [ + 1377, + 453 + ], + [ + 1375, + 456 + ], + [ + 1375, + 462 + ], + [ + 1375, + 465 + ], + [ + 1384, + 467 + ], + [ + 1396, + 467 + ], + [ + 1385, + 478 + ], + [ + 1380, + 483 + ], + [ + 1380, + 506 + ], + [ + 1379, + 524 + ], + [ + 1379, + 542 + ], + [ + 1379, + 552 + ], + [ + 1382, + 555 + ], + [ + 1394, + 557 + ], + [ + 1394, + 567 + ], + [ + 1398, + 574 + ], + [ + 1408, + 575 + ], + [ + 1419, + 575 + ], + [ + 1422, + 570 + ], + [ + 1424, + 555 + ], + [ + 1523, + 560 + ], + [ + 1570, + 561 + ], + [ + 1575, + 565 + ], + [ + 1576, + 572 + ], + [ + 1580, + 579 + ], + [ + 1587, + 581 + ], + [ + 1600, + 581 + ], + [ + 1604, + 576 + ], + [ + 1607, + 561 + ], + [ + 1607, + 544 + ], + [ + 1611, + 526 + ], + [ + 1609, + 510 + ], + [ + 1608, + 492 + ], + [ + 1604, + 480 + ], + [ + 1596, + 467 + ], + [ + 1592, + 460 + ], + [ + 1578, + 436 + ], + [ + 1569, + 424 + ], + [ + 1563, + 420 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1547, + 502 + ], + [ + 1546, + 486 + ], + [ + 1483, + 483 + ], + [ + 1483, + 500 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000085_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000085_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..d8224b5d0b281e923d66ad4b54b7b24f948dda68 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000085_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000085_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000085_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..9895a98cdbd52530093af01604026293ef7a6931 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000085_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000086_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000086_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..94b7e8cd5f20db682cd97d4c339ce37019c389e9 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000086_000019_gtFine_polygons.json @@ -0,0 +1,4669 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1021, + 0 + ], + [ + 1019, + 148 + ], + [ + 928, + 360 + ], + [ + 848, + 404 + ], + [ + 810, + 402 + ], + [ + 701, + 281 + ], + [ + 550, + 68 + ], + [ + 542, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2048, + 425 + ], + [ + 861, + 422 + ], + [ + 699, + 416 + ], + [ + 0, + 471 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1117, + 569 + ], + [ + 1065, + 542 + ], + [ + 1063, + 536 + ], + [ + 1267, + 533 + ], + [ + 1213, + 522 + ], + [ + 1160, + 509 + ], + [ + 1082, + 477 + ], + [ + 1234, + 456 + ], + [ + 2047, + 472 + ], + [ + 2047, + 593 + ], + [ + 1805, + 590 + ], + [ + 1513, + 588 + ], + [ + 1338, + 585 + ], + [ + 1219, + 579 + ], + [ + 1154, + 574 + ], + [ + 1124, + 572 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2018, + 681 + ], + [ + 1982, + 685 + ], + [ + 1926, + 699 + ], + [ + 1891, + 720 + ], + [ + 1875, + 746 + ], + [ + 1869, + 766 + ], + [ + 1869, + 792 + ], + [ + 1890, + 830 + ], + [ + 1950, + 878 + ], + [ + 2013, + 906 + ], + [ + 2048, + 924 + ], + [ + 2048, + 680 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 716, + 508 + ], + [ + 660, + 530 + ], + [ + 616, + 551 + ], + [ + 575, + 567 + ], + [ + 555, + 571 + ], + [ + 532, + 572 + ], + [ + 509, + 505 + ], + [ + 571, + 492 + ], + [ + 637, + 483 + ], + [ + 680, + 511 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 694, + 235 + ], + [ + 654, + 153 + ], + [ + 648, + 150 + ], + [ + 634, + 134 + ], + [ + 610, + 94 + ], + [ + 600, + 82 + ], + [ + 590, + 49 + ], + [ + 590, + 35 + ], + [ + 600, + 32 + ], + [ + 600, + 23 + ], + [ + 597, + 1 + ], + [ + 0, + 1 + ], + [ + 0, + 454 + ], + [ + 537, + 474 + ], + [ + 695, + 465 + ], + [ + 781, + 441 + ], + [ + 738, + 304 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 960, + 110 + ], + [ + 948, + 145 + ], + [ + 943, + 145 + ], + [ + 933, + 172 + ], + [ + 907, + 211 + ], + [ + 896, + 248 + ], + [ + 872, + 301 + ], + [ + 878, + 357 + ], + [ + 866, + 375 + ], + [ + 857, + 394 + ], + [ + 853, + 454 + ], + [ + 936, + 468 + ], + [ + 1230, + 514 + ], + [ + 1265, + 514 + ], + [ + 1314, + 510 + ], + [ + 1314, + 530 + ], + [ + 1460, + 552 + ], + [ + 1623, + 552 + ], + [ + 1763, + 550 + ], + [ + 1870, + 548 + ], + [ + 1959, + 546 + ], + [ + 1978, + 549 + ], + [ + 2047, + 549 + ], + [ + 2048, + 1 + ], + [ + 995, + 1 + ], + [ + 994, + 7 + ], + [ + 989, + 8 + ], + [ + 975, + 38 + ], + [ + 976, + 61 + ], + [ + 978, + 61 + ], + [ + 979, + 99 + ], + [ + 975, + 100 + ], + [ + 976, + 111 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 663, + 384 + ], + [ + 645, + 386 + ], + [ + 644, + 406 + ], + [ + 663, + 411 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 436, + 136 + ], + [ + 444, + 131 + ], + [ + 437, + 125 + ], + [ + 434, + 106 + ], + [ + 431, + 98 + ], + [ + 423, + 97 + ], + [ + 428, + 111 + ], + [ + 427, + 125 + ], + [ + 419, + 116 + ], + [ + 408, + 103 + ], + [ + 398, + 102 + ], + [ + 397, + 104 + ], + [ + 386, + 112 + ], + [ + 383, + 104 + ], + [ + 372, + 98 + ], + [ + 375, + 107 + ], + [ + 386, + 118 + ], + [ + 394, + 121 + ], + [ + 398, + 131 + ], + [ + 383, + 134 + ], + [ + 375, + 135 + ], + [ + 355, + 136 + ], + [ + 352, + 127 + ], + [ + 359, + 118 + ], + [ + 361, + 111 + ], + [ + 360, + 96 + ], + [ + 341, + 93 + ], + [ + 340, + 104 + ], + [ + 346, + 111 + ], + [ + 343, + 126 + ], + [ + 329, + 132 + ], + [ + 321, + 131 + ], + [ + 314, + 111 + ], + [ + 301, + 95 + ], + [ + 287, + 90 + ], + [ + 283, + 97 + ], + [ + 270, + 89 + ], + [ + 259, + 80 + ], + [ + 258, + 62 + ], + [ + 265, + 59 + ], + [ + 272, + 43 + ], + [ + 260, + 40 + ], + [ + 247, + 23 + ], + [ + 224, + 6 + ], + [ + 216, + 14 + ], + [ + 199, + 24 + ], + [ + 173, + 15 + ], + [ + 165, + 0 + ], + [ + 872, + 1 + ], + [ + 883, + 15 + ], + [ + 880, + 27 + ], + [ + 863, + 14 + ], + [ + 860, + 22 + ], + [ + 851, + 19 + ], + [ + 844, + 6 + ], + [ + 833, + 3 + ], + [ + 832, + 10 + ], + [ + 845, + 26 + ], + [ + 844, + 39 + ], + [ + 851, + 40 + ], + [ + 850, + 54 + ], + [ + 856, + 61 + ], + [ + 864, + 68 + ], + [ + 871, + 72 + ], + [ + 867, + 57 + ], + [ + 880, + 51 + ], + [ + 886, + 62 + ], + [ + 898, + 63 + ], + [ + 913, + 56 + ], + [ + 915, + 60 + ], + [ + 919, + 73 + ], + [ + 933, + 74 + ], + [ + 938, + 79 + ], + [ + 926, + 87 + ], + [ + 926, + 94 + ], + [ + 933, + 101 + ], + [ + 943, + 110 + ], + [ + 948, + 107 + ], + [ + 958, + 118 + ], + [ + 958, + 130 + ], + [ + 948, + 131 + ], + [ + 939, + 123 + ], + [ + 926, + 127 + ], + [ + 943, + 142 + ], + [ + 949, + 145 + ], + [ + 959, + 142 + ], + [ + 974, + 144 + ], + [ + 977, + 144 + ], + [ + 983, + 167 + ], + [ + 983, + 196 + ], + [ + 967, + 219 + ], + [ + 951, + 228 + ], + [ + 947, + 245 + ], + [ + 954, + 251 + ], + [ + 967, + 267 + ], + [ + 966, + 285 + ], + [ + 956, + 290 + ], + [ + 951, + 275 + ], + [ + 934, + 270 + ], + [ + 934, + 300 + ], + [ + 929, + 306 + ], + [ + 917, + 301 + ], + [ + 907, + 303 + ], + [ + 904, + 309 + ], + [ + 920, + 325 + ], + [ + 921, + 334 + ], + [ + 904, + 338 + ], + [ + 903, + 352 + ], + [ + 901, + 364 + ], + [ + 905, + 378 + ], + [ + 906, + 398 + ], + [ + 906, + 423 + ], + [ + 906, + 450 + ], + [ + 894, + 457 + ], + [ + 892, + 451 + ], + [ + 892, + 420 + ], + [ + 889, + 376 + ], + [ + 885, + 350 + ], + [ + 879, + 361 + ], + [ + 867, + 361 + ], + [ + 869, + 375 + ], + [ + 869, + 419 + ], + [ + 874, + 433 + ], + [ + 874, + 453 + ], + [ + 850, + 463 + ], + [ + 837, + 459 + ], + [ + 820, + 455 + ], + [ + 796, + 454 + ], + [ + 792, + 456 + ], + [ + 792, + 464 + ], + [ + 782, + 464 + ], + [ + 707, + 471 + ], + [ + 699, + 439 + ], + [ + 699, + 417 + ], + [ + 698, + 400 + ], + [ + 692, + 388 + ], + [ + 683, + 383 + ], + [ + 675, + 379 + ], + [ + 678, + 367 + ], + [ + 692, + 367 + ], + [ + 686, + 360 + ], + [ + 668, + 360 + ], + [ + 655, + 354 + ], + [ + 643, + 367 + ], + [ + 649, + 384 + ], + [ + 626, + 382 + ], + [ + 628, + 538 + ], + [ + 622, + 539 + ], + [ + 618, + 351 + ], + [ + 592, + 349 + ], + [ + 565, + 343 + ], + [ + 543, + 315 + ], + [ + 545, + 288 + ], + [ + 545, + 277 + ], + [ + 506, + 293 + ], + [ + 511, + 437 + ], + [ + 406, + 378 + ], + [ + 411, + 369 + ], + [ + 444, + 367 + ], + [ + 464, + 374 + ], + [ + 464, + 343 + ], + [ + 468, + 273 + ], + [ + 466, + 173 + ], + [ + 454, + 163 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 846, + 461 + ], + [ + 846, + 436 + ], + [ + 848, + 437 + ], + [ + 848, + 461 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 825, + 450 + ], + [ + 816, + 448 + ], + [ + 814, + 445 + ], + [ + 801, + 446 + ], + [ + 798, + 452 + ], + [ + 798, + 454 + ], + [ + 802, + 456 + ], + [ + 818, + 456 + ], + [ + 824, + 456 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 842, + 428 + ], + [ + 842, + 437 + ], + [ + 844, + 438 + ], + [ + 844, + 445 + ], + [ + 849, + 444 + ], + [ + 849, + 439 + ], + [ + 851, + 438 + ], + [ + 851, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 844, + 467 + ], + [ + 844, + 457 + ], + [ + 844, + 454 + ], + [ + 842, + 452 + ], + [ + 840, + 447 + ], + [ + 825, + 447 + ], + [ + 822, + 455 + ], + [ + 820, + 460 + ], + [ + 821, + 467 + ], + [ + 824, + 467 + ], + [ + 824, + 464 + ], + [ + 841, + 464 + ], + [ + 841, + 467 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 778, + 469 + ], + [ + 781, + 465 + ], + [ + 780, + 457 + ], + [ + 775, + 446 + ], + [ + 772, + 437 + ], + [ + 765, + 436 + ], + [ + 762, + 439 + ], + [ + 759, + 446 + ], + [ + 735, + 446 + ], + [ + 734, + 436 + ], + [ + 722, + 435 + ], + [ + 714, + 435 + ], + [ + 715, + 449 + ], + [ + 701, + 447 + ], + [ + 701, + 435 + ], + [ + 692, + 436 + ], + [ + 690, + 439 + ], + [ + 690, + 443 + ], + [ + 680, + 441 + ], + [ + 672, + 441 + ], + [ + 676, + 512 + ], + [ + 684, + 514 + ], + [ + 688, + 510 + ], + [ + 697, + 510 + ], + [ + 702, + 508 + ], + [ + 708, + 505 + ], + [ + 716, + 500 + ], + [ + 723, + 494 + ], + [ + 732, + 494 + ], + [ + 738, + 493 + ], + [ + 743, + 492 + ], + [ + 744, + 487 + ], + [ + 749, + 485 + ], + [ + 758, + 480 + ], + [ + 764, + 479 + ], + [ + 764, + 475 + ], + [ + 773, + 475 + ], + [ + 777, + 473 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 734, + 502 + ], + [ + 734, + 483 + ], + [ + 733, + 466 + ], + [ + 736, + 461 + ], + [ + 737, + 450 + ], + [ + 734, + 444 + ], + [ + 729, + 440 + ], + [ + 727, + 438 + ], + [ + 729, + 435 + ], + [ + 729, + 431 + ], + [ + 727, + 428 + ], + [ + 724, + 427 + ], + [ + 720, + 429 + ], + [ + 720, + 433 + ], + [ + 722, + 438 + ], + [ + 722, + 439 + ], + [ + 714, + 441 + ], + [ + 710, + 447 + ], + [ + 710, + 456 + ], + [ + 711, + 462 + ], + [ + 713, + 464 + ], + [ + 715, + 466 + ], + [ + 715, + 494 + ], + [ + 714, + 504 + ], + [ + 723, + 504 + ], + [ + 721, + 501 + ], + [ + 723, + 493 + ], + [ + 727, + 497 + ], + [ + 727, + 505 + ], + [ + 730, + 505 + ], + [ + 735, + 505 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 751, + 433 + ], + [ + 747, + 437 + ], + [ + 747, + 440 + ], + [ + 747, + 442 + ], + [ + 742, + 443 + ], + [ + 739, + 447 + ], + [ + 739, + 458 + ], + [ + 744, + 464 + ], + [ + 744, + 480 + ], + [ + 745, + 490 + ], + [ + 746, + 497 + ], + [ + 747, + 498 + ], + [ + 747, + 505 + ], + [ + 748, + 506 + ], + [ + 757, + 506 + ], + [ + 759, + 505 + ], + [ + 759, + 498 + ], + [ + 760, + 493 + ], + [ + 759, + 480 + ], + [ + 761, + 463 + ], + [ + 765, + 470 + ], + [ + 766, + 474 + ], + [ + 768, + 474 + ], + [ + 768, + 473 + ], + [ + 768, + 464 + ], + [ + 762, + 446 + ], + [ + 760, + 444 + ], + [ + 755, + 443 + ], + [ + 755, + 439 + ], + [ + 754, + 435 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 687, + 399 + ], + [ + 670, + 400 + ], + [ + 664, + 396 + ], + [ + 655, + 389 + ], + [ + 652, + 401 + ], + [ + 646, + 400 + ], + [ + 622, + 402 + ], + [ + 510, + 406 + ], + [ + 502, + 406 + ], + [ + 533, + 519 + ], + [ + 624, + 524 + ], + [ + 655, + 522 + ], + [ + 678, + 513 + ], + [ + 677, + 416 + ], + [ + 681, + 405 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 598, + 438 + ], + [ + 593, + 433 + ], + [ + 570, + 429 + ], + [ + 538, + 429 + ], + [ + 513, + 431 + ], + [ + 537, + 533 + ], + [ + 611, + 532 + ], + [ + 616, + 530 + ], + [ + 618, + 505 + ], + [ + 618, + 486 + ], + [ + 611, + 465 + ], + [ + 614, + 464 + ], + [ + 615, + 460 + ], + [ + 614, + 457 + ], + [ + 607, + 454 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 567, + 556 + ], + [ + 558, + 550 + ], + [ + 545, + 545 + ], + [ + 404, + 387 + ], + [ + 400, + 370 + ], + [ + 416, + 368 + ], + [ + 444, + 369 + ], + [ + 454, + 374 + ], + [ + 460, + 375 + ], + [ + 467, + 380 + ], + [ + 468, + 384 + ], + [ + 459, + 384 + ], + [ + 460, + 400 + ], + [ + 486, + 403 + ], + [ + 503, + 406 + ], + [ + 511, + 411 + ], + [ + 514, + 418 + ], + [ + 526, + 428 + ], + [ + 527, + 417 + ], + [ + 538, + 426 + ], + [ + 542, + 433 + ], + [ + 544, + 434 + ], + [ + 549, + 429 + ], + [ + 555, + 429 + ], + [ + 559, + 435 + ], + [ + 567, + 432 + ], + [ + 573, + 434 + ], + [ + 575, + 438 + ], + [ + 581, + 447 + ], + [ + 582, + 434 + ], + [ + 586, + 447 + ], + [ + 586, + 450 + ], + [ + 595, + 452 + ], + [ + 599, + 456 + ], + [ + 600, + 459 + ], + [ + 597, + 465 + ], + [ + 602, + 469 + ], + [ + 607, + 470 + ], + [ + 612, + 486 + ], + [ + 613, + 492 + ], + [ + 614, + 498 + ], + [ + 618, + 504 + ], + [ + 620, + 509 + ], + [ + 623, + 525 + ], + [ + 632, + 529 + ], + [ + 636, + 534 + ], + [ + 626, + 537 + ], + [ + 609, + 541 + ], + [ + 609, + 548 + ], + [ + 613, + 552 + ], + [ + 608, + 558 + ], + [ + 585, + 558 + ], + [ + 570, + 559 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 523, + 209 + ], + [ + 532, + 448 + ], + [ + 534, + 537 + ], + [ + 542, + 539 + ], + [ + 536, + 374 + ], + [ + 530, + 208 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 448, + 246 + ], + [ + 449, + 271 + ], + [ + 469, + 271 + ], + [ + 469, + 283 + ], + [ + 502, + 281 + ], + [ + 502, + 269 + ], + [ + 526, + 267 + ], + [ + 525, + 242 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 194, + 259 + ], + [ + 197, + 295 + ], + [ + 225, + 298 + ], + [ + 222, + 263 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 237, + 363 + ], + [ + 209, + 295 + ], + [ + 206, + 295 + ], + [ + 182, + 361 + ], + [ + 207, + 378 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 545, + 379 + ], + [ + 544, + 360 + ], + [ + 518, + 360 + ], + [ + 519, + 378 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 548, + 272 + ], + [ + 530, + 269 + ], + [ + 531, + 294 + ], + [ + 548, + 295 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 516, + 301 + ], + [ + 509, + 306 + ], + [ + 503, + 314 + ], + [ + 500, + 324 + ], + [ + 500, + 337 + ], + [ + 506, + 350 + ], + [ + 514, + 356 + ], + [ + 522, + 358 + ], + [ + 534, + 358 + ], + [ + 547, + 353 + ], + [ + 555, + 342 + ], + [ + 558, + 329 + ], + [ + 554, + 316 + ], + [ + 548, + 307 + ], + [ + 539, + 301 + ], + [ + 527, + 300 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1759, + 560 + ], + [ + 1636, + 568 + ], + [ + 1714, + 585 + ], + [ + 1826, + 584 + ], + [ + 1919, + 585 + ], + [ + 1856, + 562 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 71, + 363 + ], + [ + 19, + 376 + ], + [ + 0, + 385 + ], + [ + 0, + 652 + ], + [ + 140, + 651 + ], + [ + 150, + 691 + ], + [ + 160, + 714 + ], + [ + 173, + 725 + ], + [ + 205, + 729 + ], + [ + 232, + 723 + ], + [ + 257, + 704 + ], + [ + 267, + 685 + ], + [ + 273, + 658 + ], + [ + 363, + 644 + ], + [ + 417, + 640 + ], + [ + 433, + 664 + ], + [ + 448, + 672 + ], + [ + 468, + 676 + ], + [ + 489, + 673 + ], + [ + 506, + 663 + ], + [ + 518, + 646 + ], + [ + 525, + 623 + ], + [ + 539, + 618 + ], + [ + 553, + 598 + ], + [ + 558, + 568 + ], + [ + 556, + 548 + ], + [ + 552, + 530 + ], + [ + 545, + 468 + ], + [ + 530, + 448 + ], + [ + 505, + 422 + ], + [ + 465, + 395 + ], + [ + 441, + 381 + ], + [ + 416, + 372 + ], + [ + 376, + 366 + ], + [ + 303, + 360 + ], + [ + 229, + 357 + ], + [ + 150, + 358 + ], + [ + 104, + 360 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 387, + 531 + ], + [ + 388, + 561 + ], + [ + 493, + 555 + ], + [ + 494, + 527 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 952, + 447 + ], + [ + 949, + 362 + ], + [ + 942, + 337 + ], + [ + 933, + 327 + ], + [ + 917, + 319 + ], + [ + 891, + 316 + ], + [ + 893, + 315 + ], + [ + 917, + 317 + ], + [ + 926, + 320 + ], + [ + 934, + 325 + ], + [ + 940, + 330 + ], + [ + 947, + 341 + ], + [ + 950, + 353 + ], + [ + 952, + 366 + ], + [ + 955, + 451 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 886, + 304 + ], + [ + 882, + 315 + ], + [ + 883, + 317 + ], + [ + 895, + 317 + ], + [ + 896, + 314 + ], + [ + 892, + 307 + ], + [ + 890, + 304 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1084, + 415 + ], + [ + 1081, + 301 + ], + [ + 1080, + 257 + ], + [ + 1077, + 240 + ], + [ + 1066, + 218 + ], + [ + 1055, + 205 + ], + [ + 1042, + 194 + ], + [ + 1028, + 186 + ], + [ + 1007, + 179 + ], + [ + 971, + 177 + ], + [ + 970, + 173 + ], + [ + 1000, + 175 + ], + [ + 1015, + 177 + ], + [ + 1029, + 183 + ], + [ + 1049, + 195 + ], + [ + 1061, + 205 + ], + [ + 1072, + 221 + ], + [ + 1081, + 243 + ], + [ + 1084, + 260 + ], + [ + 1090, + 415 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 968, + 154 + ], + [ + 961, + 154 + ], + [ + 950, + 168 + ], + [ + 950, + 177 + ], + [ + 962, + 180 + ], + [ + 978, + 178 + ], + [ + 978, + 171 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 880, + 449 + ], + [ + 864, + 448 + ], + [ + 855, + 448 + ], + [ + 850, + 456 + ], + [ + 849, + 467 + ], + [ + 851, + 471 + ], + [ + 853, + 475 + ], + [ + 859, + 476 + ], + [ + 859, + 472 + ], + [ + 867, + 472 + ], + [ + 873, + 475 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 898, + 448 + ], + [ + 887, + 448 + ], + [ + 877, + 448 + ], + [ + 873, + 455 + ], + [ + 867, + 457 + ], + [ + 867, + 459 + ], + [ + 870, + 461 + ], + [ + 869, + 470 + ], + [ + 871, + 476 + ], + [ + 874, + 477 + ], + [ + 878, + 477 + ], + [ + 878, + 474 + ], + [ + 894, + 475 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 912, + 445 + ], + [ + 899, + 445 + ], + [ + 891, + 450 + ], + [ + 889, + 457 + ], + [ + 886, + 458 + ], + [ + 887, + 467 + ], + [ + 887, + 477 + ], + [ + 889, + 484 + ], + [ + 896, + 484 + ], + [ + 899, + 484 + ], + [ + 907, + 457 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 926, + 444 + ], + [ + 912, + 445 + ], + [ + 902, + 451 + ], + [ + 899, + 458 + ], + [ + 895, + 458 + ], + [ + 894, + 462 + ], + [ + 897, + 465 + ], + [ + 897, + 474 + ], + [ + 898, + 485 + ], + [ + 900, + 487 + ], + [ + 903, + 488 + ], + [ + 909, + 487 + ], + [ + 917, + 483 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 930, + 444 + ], + [ + 919, + 445 + ], + [ + 913, + 447 + ], + [ + 910, + 454 + ], + [ + 908, + 459 + ], + [ + 903, + 461 + ], + [ + 903, + 465 + ], + [ + 905, + 467 + ], + [ + 908, + 471 + ], + [ + 908, + 483 + ], + [ + 909, + 488 + ], + [ + 912, + 489 + ], + [ + 915, + 489 + ], + [ + 928, + 448 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 947, + 444 + ], + [ + 938, + 443 + ], + [ + 926, + 444 + ], + [ + 919, + 449 + ], + [ + 915, + 460 + ], + [ + 910, + 461 + ], + [ + 910, + 464 + ], + [ + 911, + 465 + ], + [ + 913, + 468 + ], + [ + 913, + 481 + ], + [ + 913, + 490 + ], + [ + 915, + 493 + ], + [ + 921, + 494 + ], + [ + 927, + 493 + ], + [ + 934, + 471 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 973, + 445 + ], + [ + 957, + 443 + ], + [ + 946, + 444 + ], + [ + 932, + 446 + ], + [ + 928, + 459 + ], + [ + 925, + 462 + ], + [ + 924, + 465 + ], + [ + 926, + 469 + ], + [ + 926, + 482 + ], + [ + 927, + 492 + ], + [ + 930, + 496 + ], + [ + 936, + 497 + ], + [ + 945, + 496 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 985, + 443 + ], + [ + 960, + 444 + ], + [ + 948, + 448 + ], + [ + 944, + 459 + ], + [ + 939, + 470 + ], + [ + 937, + 476 + ], + [ + 938, + 495 + ], + [ + 939, + 508 + ], + [ + 943, + 509 + ], + [ + 948, + 508 + ], + [ + 951, + 505 + ], + [ + 956, + 506 + ], + [ + 956, + 510 + ], + [ + 959, + 511 + ], + [ + 965, + 512 + ], + [ + 969, + 511 + ], + [ + 979, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 999, + 442 + ], + [ + 985, + 443 + ], + [ + 979, + 446 + ], + [ + 970, + 463 + ], + [ + 967, + 467 + ], + [ + 967, + 472 + ], + [ + 968, + 474 + ], + [ + 968, + 483 + ], + [ + 968, + 500 + ], + [ + 968, + 510 + ], + [ + 969, + 513 + ], + [ + 972, + 515 + ], + [ + 977, + 515 + ], + [ + 981, + 517 + ], + [ + 989, + 517 + ], + [ + 990, + 499 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1037, + 437 + ], + [ + 1006, + 439 + ], + [ + 999, + 440 + ], + [ + 992, + 447 + ], + [ + 983, + 466 + ], + [ + 978, + 471 + ], + [ + 976, + 484 + ], + [ + 978, + 506 + ], + [ + 978, + 515 + ], + [ + 980, + 517 + ], + [ + 987, + 517 + ], + [ + 991, + 517 + ], + [ + 996, + 518 + ], + [ + 1002, + 518 + ], + [ + 1007, + 518 + ], + [ + 1009, + 515 + ], + [ + 1010, + 511 + ], + [ + 1015, + 510 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1055, + 434 + ], + [ + 1038, + 435 + ], + [ + 1033, + 438 + ], + [ + 1029, + 445 + ], + [ + 1024, + 458 + ], + [ + 1017, + 458 + ], + [ + 1014, + 461 + ], + [ + 1015, + 465 + ], + [ + 1018, + 466 + ], + [ + 1011, + 479 + ], + [ + 1010, + 487 + ], + [ + 1010, + 513 + ], + [ + 1011, + 519 + ], + [ + 1014, + 522 + ], + [ + 1019, + 522 + ], + [ + 1023, + 522 + ], + [ + 1024, + 520 + ], + [ + 1029, + 519 + ], + [ + 1029, + 524 + ], + [ + 1032, + 526 + ], + [ + 1036, + 527 + ], + [ + 1043, + 525 + ], + [ + 1062, + 438 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1132, + 411 + ], + [ + 1095, + 408 + ], + [ + 1076, + 410 + ], + [ + 1074, + 408 + ], + [ + 1071, + 408 + ], + [ + 1067, + 410 + ], + [ + 1057, + 424 + ], + [ + 1051, + 441 + ], + [ + 1047, + 442 + ], + [ + 1046, + 447 + ], + [ + 1042, + 453 + ], + [ + 1039, + 469 + ], + [ + 1039, + 487 + ], + [ + 1040, + 496 + ], + [ + 1039, + 518 + ], + [ + 1039, + 528 + ], + [ + 1043, + 531 + ], + [ + 1051, + 531 + ], + [ + 1052, + 534 + ], + [ + 1055, + 536 + ], + [ + 1071, + 536 + ], + [ + 1073, + 530 + ], + [ + 1073, + 522 + ], + [ + 1092, + 521 + ], + [ + 1136, + 415 + ], + [ + 1134, + 411 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1105, + 248 + ], + [ + 1090, + 256 + ], + [ + 1079, + 264 + ], + [ + 1066, + 272 + ], + [ + 1062, + 286 + ], + [ + 1063, + 293 + ], + [ + 1068, + 304 + ], + [ + 1079, + 309 + ], + [ + 1081, + 315 + ], + [ + 1077, + 318 + ], + [ + 1081, + 322 + ], + [ + 1081, + 325 + ], + [ + 1071, + 326 + ], + [ + 1071, + 333 + ], + [ + 1077, + 345 + ], + [ + 1100, + 342 + ], + [ + 1108, + 337 + ], + [ + 1106, + 329 + ], + [ + 1106, + 323 + ], + [ + 1103, + 319 + ], + [ + 1103, + 313 + ], + [ + 1107, + 307 + ], + [ + 1098, + 304 + ], + [ + 1097, + 292 + ], + [ + 1095, + 287 + ], + [ + 1101, + 275 + ], + [ + 1101, + 272 + ], + [ + 1092, + 274 + ], + [ + 1088, + 271 + ], + [ + 1091, + 267 + ], + [ + 1100, + 266 + ], + [ + 1106, + 268 + ], + [ + 1107, + 269 + ], + [ + 1110, + 293 + ], + [ + 1115, + 329 + ], + [ + 1122, + 375 + ], + [ + 1131, + 419 + ], + [ + 1115, + 415 + ], + [ + 1114, + 412 + ], + [ + 1106, + 417 + ], + [ + 1112, + 427 + ], + [ + 1106, + 429 + ], + [ + 1107, + 436 + ], + [ + 1096, + 434 + ], + [ + 1095, + 442 + ], + [ + 1087, + 448 + ], + [ + 1080, + 455 + ], + [ + 1074, + 464 + ], + [ + 1066, + 465 + ], + [ + 1065, + 478 + ], + [ + 1064, + 488 + ], + [ + 1069, + 497 + ], + [ + 1074, + 501 + ], + [ + 1079, + 501 + ], + [ + 1077, + 525 + ], + [ + 1073, + 535 + ], + [ + 1071, + 543 + ], + [ + 1083, + 548 + ], + [ + 1107, + 551 + ], + [ + 1140, + 549 + ], + [ + 1212, + 547 + ], + [ + 1265, + 542 + ], + [ + 1248, + 534 + ], + [ + 1231, + 532 + ], + [ + 1220, + 540 + ], + [ + 1216, + 536 + ], + [ + 1214, + 528 + ], + [ + 1232, + 522 + ], + [ + 1237, + 507 + ], + [ + 1243, + 500 + ], + [ + 1240, + 496 + ], + [ + 1240, + 479 + ], + [ + 1249, + 491 + ], + [ + 1255, + 491 + ], + [ + 1256, + 484 + ], + [ + 1254, + 472 + ], + [ + 1255, + 459 + ], + [ + 1249, + 454 + ], + [ + 1241, + 453 + ], + [ + 1232, + 442 + ], + [ + 1226, + 435 + ], + [ + 1214, + 436 + ], + [ + 1202, + 437 + ], + [ + 1192, + 429 + ], + [ + 1184, + 415 + ], + [ + 1180, + 411 + ], + [ + 1173, + 417 + ], + [ + 1156, + 414 + ], + [ + 1148, + 367 + ], + [ + 1138, + 316 + ], + [ + 1135, + 289 + ], + [ + 1153, + 277 + ], + [ + 1165, + 269 + ], + [ + 1179, + 254 + ], + [ + 1170, + 250 + ], + [ + 1150, + 250 + ], + [ + 1143, + 254 + ], + [ + 1143, + 230 + ], + [ + 1153, + 234 + ], + [ + 1163, + 219 + ], + [ + 1156, + 210 + ], + [ + 1148, + 207 + ], + [ + 1150, + 201 + ], + [ + 1158, + 199 + ], + [ + 1166, + 173 + ], + [ + 1156, + 170 + ], + [ + 1152, + 165 + ], + [ + 1144, + 175 + ], + [ + 1149, + 179 + ], + [ + 1151, + 184 + ], + [ + 1141, + 194 + ], + [ + 1136, + 190 + ], + [ + 1135, + 199 + ], + [ + 1137, + 200 + ], + [ + 1135, + 209 + ], + [ + 1130, + 207 + ], + [ + 1124, + 209 + ], + [ + 1126, + 214 + ], + [ + 1129, + 222 + ], + [ + 1124, + 226 + ], + [ + 1119, + 194 + ], + [ + 1118, + 167 + ], + [ + 1115, + 152 + ], + [ + 1112, + 135 + ], + [ + 1107, + 124 + ], + [ + 1102, + 100 + ], + [ + 1098, + 85 + ], + [ + 1094, + 31 + ], + [ + 1099, + 34 + ], + [ + 1108, + 42 + ], + [ + 1120, + 38 + ], + [ + 1128, + 22 + ], + [ + 1130, + 21 + ], + [ + 1134, + 26 + ], + [ + 1135, + 43 + ], + [ + 1146, + 37 + ], + [ + 1147, + 42 + ], + [ + 1147, + 48 + ], + [ + 1155, + 52 + ], + [ + 1153, + 59 + ], + [ + 1156, + 60 + ], + [ + 1162, + 52 + ], + [ + 1164, + 61 + ], + [ + 1169, + 55 + ], + [ + 1181, + 62 + ], + [ + 1184, + 55 + ], + [ + 1196, + 44 + ], + [ + 1191, + 38 + ], + [ + 1179, + 40 + ], + [ + 1173, + 29 + ], + [ + 1168, + 28 + ], + [ + 1163, + 32 + ], + [ + 1156, + 26 + ], + [ + 1160, + 17 + ], + [ + 1160, + 4 + ], + [ + 1159, + 1 + ], + [ + 1180, + 0 + ], + [ + 1180, + 5 + ], + [ + 1187, + 12 + ], + [ + 1196, + 10 + ], + [ + 1202, + 11 + ], + [ + 1204, + 4 + ], + [ + 1201, + 0 + ], + [ + 1108, + 0 + ], + [ + 1098, + 15 + ], + [ + 1094, + 16 + ], + [ + 1091, + 6 + ], + [ + 1093, + 0 + ], + [ + 1067, + 1 + ], + [ + 1066, + 9 + ], + [ + 1070, + 26 + ], + [ + 1069, + 44 + ], + [ + 1072, + 53 + ], + [ + 1071, + 58 + ], + [ + 1070, + 62 + ], + [ + 1076, + 96 + ], + [ + 1085, + 125 + ], + [ + 1094, + 170 + ], + [ + 1093, + 174 + ], + [ + 1099, + 208 + ], + [ + 1103, + 236 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1135, + 480 + ], + [ + 1134, + 550 + ], + [ + 1157, + 554 + ], + [ + 1188, + 551 + ], + [ + 1188, + 481 + ], + [ + 1174, + 477 + ], + [ + 1148, + 476 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1336, + 357 + ], + [ + 1337, + 568 + ], + [ + 1346, + 569 + ], + [ + 1344, + 358 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1777, + 560 + ], + [ + 1777, + 480 + ], + [ + 1780, + 338 + ], + [ + 1783, + 130 + ], + [ + 1791, + 122 + ], + [ + 1782, + 116 + ], + [ + 1781, + 19 + ], + [ + 1795, + 0 + ], + [ + 1765, + 0 + ], + [ + 1757, + 560 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1814, + 539 + ], + [ + 1801, + 549 + ], + [ + 1790, + 553 + ], + [ + 1745, + 558 + ], + [ + 1744, + 555 + ], + [ + 1734, + 555 + ], + [ + 1733, + 581 + ], + [ + 1744, + 580 + ], + [ + 1744, + 575 + ], + [ + 1799, + 572 + ], + [ + 1801, + 579 + ], + [ + 1810, + 579 + ], + [ + 1812, + 568 + ], + [ + 1847, + 571 + ], + [ + 1856, + 571 + ], + [ + 1853, + 555 + ], + [ + 1861, + 554 + ], + [ + 1860, + 548 + ], + [ + 1824, + 550 + ], + [ + 1824, + 539 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1238, + 114 + ], + [ + 1237, + 0 + ], + [ + 1665, + 0 + ], + [ + 1666, + 114 + ], + [ + 1461, + 114 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1309, + 310 + ], + [ + 1309, + 295 + ], + [ + 1313, + 284 + ], + [ + 1321, + 275 + ], + [ + 1331, + 270 + ], + [ + 1340, + 268 + ], + [ + 1351, + 269 + ], + [ + 1361, + 274 + ], + [ + 1369, + 283 + ], + [ + 1373, + 295 + ], + [ + 1373, + 308 + ], + [ + 1367, + 319 + ], + [ + 1361, + 326 + ], + [ + 1349, + 332 + ], + [ + 1330, + 332 + ], + [ + 1323, + 328 + ], + [ + 1315, + 320 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1363, + 329 + ], + [ + 1322, + 329 + ], + [ + 1321, + 361 + ], + [ + 1362, + 361 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..715284442a44934ca4299f5f809a6072b1c6182e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..cc9ba9697d4b8ee7636327be954396acd1dcbeb7 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..25eee102806d4cdf09c3ac1e75dcffaa595e507a --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000088_000019_gtFine_polygons.json @@ -0,0 +1,612 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "building", + "polygon": [ + [ + 2049, + 1 + ], + [ + 0, + 0 + ], + [ + 0, + 617 + ], + [ + 1391, + 595 + ], + [ + 2048, + 568 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1739, + 426 + ], + [ + 1733, + 404 + ], + [ + 1734, + 389 + ], + [ + 1738, + 380 + ], + [ + 1741, + 365 + ], + [ + 1726, + 356 + ], + [ + 1709, + 356 + ], + [ + 1706, + 365 + ], + [ + 1705, + 381 + ], + [ + 1698, + 389 + ], + [ + 1691, + 397 + ], + [ + 1686, + 408 + ], + [ + 1682, + 423 + ], + [ + 1717, + 436 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 0, + 1024 + ], + [ + 0, + 630 + ], + [ + 1093, + 611 + ], + [ + 2048, + 582 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 657, + 649 + ], + [ + 194, + 660 + ], + [ + 0, + 665 + ], + [ + 0, + 598 + ], + [ + 362, + 591 + ], + [ + 720, + 586 + ], + [ + 720, + 559 + ], + [ + 1044, + 553 + ], + [ + 1063, + 580 + ], + [ + 1110, + 578 + ], + [ + 1109, + 553 + ], + [ + 1314, + 550 + ], + [ + 1316, + 575 + ], + [ + 1376, + 574 + ], + [ + 2047, + 556 + ], + [ + 2048, + 635 + ], + [ + 1462, + 630 + ], + [ + 1194, + 637 + ], + [ + 834, + 644 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 700, + 588 + ], + [ + 698, + 1 + ], + [ + 683, + 1 + ], + [ + 683, + 588 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 352, + 443 + ], + [ + 356, + 631 + ], + [ + 607, + 633 + ], + [ + 600, + 440 + ], + [ + 498, + 440 + ], + [ + 481, + 443 + ], + [ + 379, + 441 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1851, + 447 + ], + [ + 1836, + 446 + ], + [ + 1811, + 457 + ], + [ + 1871, + 489 + ], + [ + 1862, + 462 + ], + [ + 1855, + 455 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1663, + 407 + ], + [ + 1580, + 408 + ], + [ + 1527, + 413 + ], + [ + 1508, + 418 + ], + [ + 1474, + 423 + ], + [ + 1434, + 433 + ], + [ + 1376, + 493 + ], + [ + 1370, + 512 + ], + [ + 1368, + 541 + ], + [ + 1367, + 553 + ], + [ + 1373, + 581 + ], + [ + 1375, + 590 + ], + [ + 1382, + 600 + ], + [ + 1388, + 601 + ], + [ + 1452, + 603 + ], + [ + 1460, + 620 + ], + [ + 1471, + 631 + ], + [ + 1487, + 639 + ], + [ + 1515, + 639 + ], + [ + 1531, + 635 + ], + [ + 1541, + 627 + ], + [ + 1557, + 638 + ], + [ + 1572, + 643 + ], + [ + 1591, + 643 + ], + [ + 1612, + 634 + ], + [ + 1625, + 620 + ], + [ + 1626, + 613 + ], + [ + 1843, + 615 + ], + [ + 1861, + 626 + ], + [ + 1880, + 630 + ], + [ + 1902, + 628 + ], + [ + 1915, + 622 + ], + [ + 1921, + 613 + ], + [ + 1952, + 610 + ], + [ + 1967, + 626 + ], + [ + 1982, + 635 + ], + [ + 2000, + 638 + ], + [ + 2019, + 638 + ], + [ + 2036, + 631 + ], + [ + 2044, + 623 + ], + [ + 2047, + 622 + ], + [ + 2048, + 514 + ], + [ + 1986, + 503 + ], + [ + 1900, + 491 + ], + [ + 1882, + 477 + ], + [ + 1873, + 472 + ], + [ + 1803, + 435 + ], + [ + 1778, + 422 + ], + [ + 1750, + 413 + ], + [ + 1716, + 407 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..b9ce0136d4e856ba618cb44f75d6601f952c036c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..f7d6eb901d6bbdacc1f742a10e3b57639cae60fe Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..7a18eb417f20159f0ef8aaac22b170b7c113b5c7 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..2f668439ad684f94c46790f276b0e968e6c3bb84 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000089_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000090_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000090_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..d0f3e922f9e02f3333599c712a42506b69a407b0 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000090_000019_gtFine_polygons.json @@ -0,0 +1,4025 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1266, + 0 + ], + [ + 777, + 1 + ], + [ + 822, + 256 + ], + [ + 967, + 372 + ], + [ + 1097, + 360 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2049, + 330 + ], + [ + 1128, + 388 + ], + [ + 0, + 424 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 979, + 325 + ], + [ + 977, + 271 + ], + [ + 948, + 269 + ], + [ + 942, + 248 + ], + [ + 935, + 269 + ], + [ + 933, + 263 + ], + [ + 916, + 264 + ], + [ + 902, + 245 + ], + [ + 898, + 245 + ], + [ + 892, + 180 + ], + [ + 885, + 179 + ], + [ + 886, + 190 + ], + [ + 879, + 190 + ], + [ + 876, + 147 + ], + [ + 866, + 147 + ], + [ + 867, + 159 + ], + [ + 867, + 161 + ], + [ + 854, + 160 + ], + [ + 851, + 32 + ], + [ + 836, + 0 + ], + [ + 836, + 1 + ], + [ + 1, + 0 + ], + [ + 0, + 467 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 309, + 343 + ], + [ + 308, + 286 + ], + [ + 313, + 263 + ], + [ + 288, + 218 + ], + [ + 239, + 228 + ], + [ + 212, + 211 + ], + [ + 179, + 201 + ], + [ + 115, + 209 + ], + [ + 66, + 188 + ], + [ + 25, + 155 + ], + [ + 0, + 153 + ], + [ + 0, + 394 + ], + [ + 304, + 374 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 364, + 240 + ], + [ + 368, + 340 + ], + [ + 429, + 340 + ], + [ + 422, + 241 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1046, + 323 + ], + [ + 1045, + 264 + ], + [ + 1050, + 255 + ], + [ + 1081, + 253 + ], + [ + 1081, + 237 + ], + [ + 1084, + 223 + ], + [ + 1082, + 196 + ], + [ + 1086, + 190 + ], + [ + 1086, + 179 + ], + [ + 1090, + 175 + ], + [ + 1090, + 165 + ], + [ + 1093, + 156 + ], + [ + 1095, + 155 + ], + [ + 1095, + 140 + ], + [ + 1099, + 134 + ], + [ + 1114, + 119 + ], + [ + 1122, + 90 + ], + [ + 1148, + 78 + ], + [ + 1214, + 17 + ], + [ + 1223, + 1 + ], + [ + 2048, + 1 + ], + [ + 2048, + 446 + ], + [ + 1754, + 453 + ], + [ + 1341, + 449 + ], + [ + 1145, + 446 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1936, + 435 + ], + [ + 1894, + 436 + ], + [ + 1799, + 439 + ], + [ + 1760, + 442 + ], + [ + 1623, + 445 + ], + [ + 1381, + 455 + ], + [ + 1335, + 456 + ], + [ + 1171, + 455 + ], + [ + 1151, + 456 + ], + [ + 1121, + 448 + ], + [ + 1114, + 460 + ], + [ + 1124, + 468 + ], + [ + 1184, + 468 + ], + [ + 1320, + 468 + ], + [ + 1347, + 466 + ], + [ + 1383, + 466 + ], + [ + 1452, + 466 + ], + [ + 1488, + 467 + ], + [ + 1591, + 463 + ], + [ + 1623, + 462 + ], + [ + 2048, + 456 + ], + [ + 2048, + 433 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2019, + 564 + ], + [ + 1962, + 574 + ], + [ + 1919, + 583 + ], + [ + 1890, + 596 + ], + [ + 1878, + 617 + ], + [ + 1875, + 636 + ], + [ + 1882, + 652 + ], + [ + 1898, + 665 + ], + [ + 1945, + 687 + ], + [ + 2002, + 706 + ], + [ + 2048, + 721 + ], + [ + 2048, + 563 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1197, + 439 + ], + [ + 1158, + 440 + ], + [ + 1146, + 443 + ], + [ + 1153, + 460 + ], + [ + 1183, + 458 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1153, + 413 + ], + [ + 1145, + 407 + ], + [ + 1130, + 407 + ], + [ + 1145, + 454 + ], + [ + 1155, + 456 + ], + [ + 1158, + 450 + ], + [ + 1163, + 449 + ], + [ + 1165, + 440 + ], + [ + 1158, + 432 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1131, + 263 + ], + [ + 1133, + 387 + ], + [ + 1133, + 414 + ], + [ + 1138, + 416 + ], + [ + 1134, + 262 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1142, + 349 + ], + [ + 1137, + 348 + ], + [ + 1137, + 366 + ], + [ + 1142, + 364 + ], + [ + 1142, + 347 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1140, + 380 + ], + [ + 1136, + 377 + ], + [ + 1132, + 381 + ], + [ + 1132, + 410 + ], + [ + 1142, + 408 + ], + [ + 1144, + 394 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1218, + 414 + ], + [ + 1210, + 414 + ], + [ + 1210, + 410 + ], + [ + 1199, + 411 + ], + [ + 1187, + 429 + ], + [ + 1187, + 437 + ], + [ + 1186, + 443 + ], + [ + 1222, + 442 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1382, + 408 + ], + [ + 1324, + 410 + ], + [ + 1322, + 412 + ], + [ + 1306, + 413 + ], + [ + 1309, + 436 + ], + [ + 1378, + 433 + ], + [ + 1387, + 410 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1381, + 428 + ], + [ + 1346, + 428 + ], + [ + 1326, + 432 + ], + [ + 1329, + 461 + ], + [ + 1384, + 460 + ], + [ + 1387, + 434 + ], + [ + 1384, + 428 + ] + ] + }, + { + "label": "train", + "polygon": [ + [ + 245, + 212 + ], + [ + 252, + 225 + ], + [ + 337, + 272 + ], + [ + 335, + 279 + ], + [ + 282, + 324 + ], + [ + 273, + 324 + ], + [ + 272, + 332 + ], + [ + 218, + 333 + ], + [ + 155, + 332 + ], + [ + 0, + 337 + ], + [ + 0, + 514 + ], + [ + 254, + 503 + ], + [ + 254, + 498 + ], + [ + 267, + 493 + ], + [ + 495, + 486 + ], + [ + 520, + 487 + ], + [ + 533, + 490 + ], + [ + 570, + 490 + ], + [ + 575, + 484 + ], + [ + 585, + 484 + ], + [ + 591, + 493 + ], + [ + 711, + 489 + ], + [ + 731, + 480 + ], + [ + 737, + 480 + ], + [ + 743, + 484 + ], + [ + 786, + 481 + ], + [ + 788, + 478 + ], + [ + 799, + 478 + ], + [ + 807, + 476 + ], + [ + 1012, + 471 + ], + [ + 1019, + 472 + ], + [ + 1029, + 470 + ], + [ + 1039, + 470 + ], + [ + 1042, + 477 + ], + [ + 1045, + 479 + ], + [ + 1049, + 480 + ], + [ + 1060, + 480 + ], + [ + 1063, + 476 + ], + [ + 1078, + 474 + ], + [ + 1091, + 474 + ], + [ + 1098, + 474 + ], + [ + 1103, + 477 + ], + [ + 1111, + 479 + ], + [ + 1124, + 477 + ], + [ + 1125, + 471 + ], + [ + 1128, + 465 + ], + [ + 1132, + 465 + ], + [ + 1132, + 468 + ], + [ + 1139, + 467 + ], + [ + 1139, + 465 + ], + [ + 1148, + 463 + ], + [ + 1154, + 459 + ], + [ + 1156, + 450 + ], + [ + 1153, + 443 + ], + [ + 1142, + 440 + ], + [ + 1139, + 416 + ], + [ + 1131, + 349 + ], + [ + 1125, + 325 + ], + [ + 1115, + 323 + ], + [ + 1085, + 317 + ], + [ + 1034, + 313 + ], + [ + 1012, + 313 + ], + [ + 769, + 321 + ], + [ + 742, + 322 + ], + [ + 725, + 314 + ], + [ + 566, + 319 + ], + [ + 560, + 325 + ], + [ + 548, + 326 + ], + [ + 544, + 328 + ], + [ + 521, + 328 + ], + [ + 520, + 327 + ], + [ + 456, + 329 + ], + [ + 338, + 332 + ], + [ + 335, + 328 + ], + [ + 331, + 322 + ], + [ + 289, + 325 + ], + [ + 341, + 280 + ], + [ + 346, + 277 + ], + [ + 346, + 271 + ], + [ + 344, + 266 + ], + [ + 280, + 225 + ], + [ + 277, + 218 + ], + [ + 259, + 214 + ], + [ + 244, + 210 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 296, + 511 + ], + [ + 294, + 501 + ], + [ + 192, + 502 + ], + [ + 38, + 508 + ], + [ + 0, + 509 + ], + [ + 0, + 521 + ], + [ + 118, + 518 + ], + [ + 245, + 513 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 22, + 556 + ], + [ + 17, + 538 + ], + [ + 0, + 537 + ], + [ + 0, + 557 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 42, + 725 + ], + [ + 10, + 722 + ], + [ + 0, + 722 + ], + [ + 0, + 789 + ], + [ + 44, + 770 + ], + [ + 70, + 757 + ], + [ + 75, + 753 + ], + [ + 75, + 735 + ], + [ + 68, + 730 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1190, + 380 + ], + [ + 1190, + 443 + ], + [ + 1201, + 441 + ], + [ + 1201, + 378 + ], + [ + 1197, + 372 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1271, + 422 + ], + [ + 1268, + 293 + ], + [ + 1285, + 266 + ], + [ + 1289, + 296 + ], + [ + 1292, + 420 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1313, + 442 + ], + [ + 1308, + 309 + ], + [ + 1306, + 281 + ], + [ + 1303, + 273 + ], + [ + 1298, + 297 + ], + [ + 1301, + 422 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 2039, + 380 + ], + [ + 2037, + 290 + ], + [ + 2036, + 271 + ], + [ + 2013, + 311 + ], + [ + 2015, + 387 + ], + [ + 2038, + 385 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1811, + 418 + ], + [ + 1776, + 419 + ], + [ + 1758, + 420 + ], + [ + 1757, + 442 + ], + [ + 1762, + 447 + ], + [ + 1808, + 443 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1165, + 237 + ], + [ + 1148, + 242 + ], + [ + 1148, + 252 + ], + [ + 1159, + 253 + ], + [ + 1172, + 254 + ], + [ + 1180, + 275 + ], + [ + 1170, + 278 + ], + [ + 1161, + 295 + ], + [ + 1151, + 295 + ], + [ + 1150, + 310 + ], + [ + 1158, + 315 + ], + [ + 1168, + 306 + ], + [ + 1178, + 305 + ], + [ + 1172, + 311 + ], + [ + 1166, + 316 + ], + [ + 1176, + 318 + ], + [ + 1183, + 307 + ], + [ + 1188, + 311 + ], + [ + 1187, + 327 + ], + [ + 1181, + 337 + ], + [ + 1171, + 334 + ], + [ + 1172, + 350 + ], + [ + 1187, + 350 + ], + [ + 1181, + 354 + ], + [ + 1178, + 362 + ], + [ + 1185, + 376 + ], + [ + 1191, + 387 + ], + [ + 1209, + 377 + ], + [ + 1216, + 381 + ], + [ + 1224, + 434 + ], + [ + 1264, + 419 + ], + [ + 1263, + 405 + ], + [ + 1259, + 374 + ], + [ + 1259, + 363 + ], + [ + 1266, + 359 + ], + [ + 1279, + 355 + ], + [ + 1290, + 346 + ], + [ + 1299, + 342 + ], + [ + 1298, + 335 + ], + [ + 1287, + 324 + ], + [ + 1279, + 326 + ], + [ + 1269, + 327 + ], + [ + 1259, + 321 + ], + [ + 1256, + 316 + ], + [ + 1264, + 307 + ], + [ + 1273, + 296 + ], + [ + 1280, + 294 + ], + [ + 1280, + 312 + ], + [ + 1290, + 314 + ], + [ + 1293, + 317 + ], + [ + 1295, + 320 + ], + [ + 1289, + 331 + ], + [ + 1297, + 334 + ], + [ + 1305, + 324 + ], + [ + 1309, + 324 + ], + [ + 1310, + 365 + ], + [ + 1326, + 359 + ], + [ + 1327, + 349 + ], + [ + 1330, + 346 + ], + [ + 1344, + 345 + ], + [ + 1354, + 341 + ], + [ + 1367, + 346 + ], + [ + 1385, + 353 + ], + [ + 1406, + 387 + ], + [ + 1454, + 377 + ], + [ + 1453, + 357 + ], + [ + 1468, + 362 + ], + [ + 1466, + 350 + ], + [ + 1457, + 343 + ], + [ + 1448, + 325 + ], + [ + 1454, + 311 + ], + [ + 1475, + 312 + ], + [ + 1485, + 322 + ], + [ + 1489, + 347 + ], + [ + 1495, + 367 + ], + [ + 1524, + 401 + ], + [ + 1591, + 434 + ], + [ + 1658, + 431 + ], + [ + 1717, + 272 + ], + [ + 1724, + 258 + ], + [ + 1732, + 257 + ], + [ + 1739, + 266 + ], + [ + 1750, + 278 + ], + [ + 1757, + 290 + ], + [ + 1767, + 299 + ], + [ + 1750, + 304 + ], + [ + 1736, + 313 + ], + [ + 1741, + 354 + ], + [ + 1768, + 373 + ], + [ + 1787, + 383 + ], + [ + 1785, + 406 + ], + [ + 1785, + 419 + ], + [ + 1780, + 431 + ], + [ + 1783, + 443 + ], + [ + 1798, + 443 + ], + [ + 1813, + 444 + ], + [ + 1833, + 442 + ], + [ + 1857, + 436 + ], + [ + 1898, + 442 + ], + [ + 1887, + 429 + ], + [ + 1886, + 413 + ], + [ + 1878, + 371 + ], + [ + 1879, + 356 + ], + [ + 1887, + 356 + ], + [ + 1893, + 364 + ], + [ + 1890, + 379 + ], + [ + 1896, + 390 + ], + [ + 1913, + 385 + ], + [ + 1924, + 372 + ], + [ + 1935, + 383 + ], + [ + 1960, + 384 + ], + [ + 1969, + 374 + ], + [ + 1987, + 374 + ], + [ + 1993, + 353 + ], + [ + 2011, + 339 + ], + [ + 2018, + 312 + ], + [ + 2028, + 295 + ], + [ + 2037, + 292 + ], + [ + 2048, + 297 + ], + [ + 2048, + 297 + ], + [ + 2048, + 1 + ], + [ + 1143, + 1 + ], + [ + 1146, + 12 + ], + [ + 1141, + 14 + ], + [ + 1133, + 4 + ], + [ + 1117, + 4 + ], + [ + 1097, + 10 + ], + [ + 1105, + 67 + ], + [ + 1106, + 72 + ], + [ + 1103, + 80 + ], + [ + 1118, + 75 + ], + [ + 1122, + 79 + ], + [ + 1131, + 66 + ], + [ + 1131, + 76 + ], + [ + 1128, + 87 + ], + [ + 1120, + 91 + ], + [ + 1112, + 84 + ], + [ + 1106, + 86 + ], + [ + 1108, + 104 + ], + [ + 1109, + 115 + ], + [ + 1100, + 137 + ], + [ + 1103, + 139 + ], + [ + 1117, + 131 + ], + [ + 1133, + 126 + ], + [ + 1131, + 141 + ], + [ + 1116, + 148 + ], + [ + 1114, + 166 + ], + [ + 1122, + 172 + ], + [ + 1126, + 178 + ], + [ + 1126, + 185 + ], + [ + 1136, + 183 + ], + [ + 1149, + 162 + ], + [ + 1165, + 152 + ], + [ + 1159, + 171 + ], + [ + 1157, + 188 + ], + [ + 1149, + 195 + ], + [ + 1161, + 197 + ], + [ + 1152, + 212 + ], + [ + 1160, + 226 + ], + [ + 1170, + 222 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1478, + 481 + ], + [ + 1446, + 480 + ], + [ + 1435, + 481 + ], + [ + 1379, + 482 + ], + [ + 1275, + 484 + ], + [ + 1231, + 483 + ], + [ + 1226, + 481 + ], + [ + 1226, + 472 + ], + [ + 1261, + 471 + ], + [ + 1332, + 470 + ], + [ + 1385, + 470 + ], + [ + 1439, + 473 + ], + [ + 1465, + 473 + ], + [ + 1484, + 473 + ], + [ + 1593, + 470 + ], + [ + 1619, + 470 + ], + [ + 1632, + 476 + ], + [ + 1616, + 478 + ], + [ + 1587, + 479 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1231, + 293 + ], + [ + 1235, + 427 + ], + [ + 1245, + 424 + ], + [ + 1240, + 298 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1208, + 138 + ], + [ + 1212, + 414 + ], + [ + 1217, + 414 + ], + [ + 1218, + 438 + ], + [ + 1227, + 431 + ], + [ + 1220, + 138 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1322, + 426 + ], + [ + 1296, + 416 + ], + [ + 1259, + 415 + ], + [ + 1245, + 417 + ], + [ + 1236, + 420 + ], + [ + 1215, + 434 + ], + [ + 1202, + 436 + ], + [ + 1186, + 439 + ], + [ + 1173, + 443 + ], + [ + 1169, + 455 + ], + [ + 1169, + 458 + ], + [ + 1173, + 465 + ], + [ + 1178, + 465 + ], + [ + 1183, + 469 + ], + [ + 1185, + 470 + ], + [ + 1193, + 470 + ], + [ + 1204, + 470 + ], + [ + 1216, + 469 + ], + [ + 1221, + 463 + ], + [ + 1282, + 463 + ], + [ + 1286, + 468 + ], + [ + 1287, + 470 + ], + [ + 1306, + 470 + ], + [ + 1310, + 468 + ], + [ + 1312, + 470 + ], + [ + 1315, + 470 + ], + [ + 1319, + 465 + ], + [ + 1321, + 460 + ], + [ + 1335, + 457 + ], + [ + 1340, + 453 + ], + [ + 1343, + 444 + ], + [ + 1342, + 440 + ], + [ + 1341, + 429 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1446, + 330 + ], + [ + 1440, + 333 + ], + [ + 1437, + 340 + ], + [ + 1438, + 345 + ], + [ + 1444, + 350 + ], + [ + 1450, + 350 + ], + [ + 1453, + 348 + ], + [ + 1456, + 342 + ], + [ + 1455, + 335 + ], + [ + 1452, + 332 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1437, + 346 + ], + [ + 1437, + 374 + ], + [ + 1450, + 371 + ], + [ + 1450, + 367 + ], + [ + 1444, + 366 + ], + [ + 1444, + 364 + ], + [ + 1451, + 361 + ], + [ + 1451, + 354 + ], + [ + 1442, + 353 + ], + [ + 1450, + 351 + ], + [ + 1449, + 346 + ], + [ + 1442, + 345 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1366, + 424 + ], + [ + 1361, + 434 + ], + [ + 1361, + 441 + ], + [ + 1363, + 451 + ], + [ + 1367, + 456 + ], + [ + 1382, + 458 + ], + [ + 1455, + 454 + ], + [ + 1490, + 451 + ], + [ + 1624, + 446 + ], + [ + 1626, + 422 + ], + [ + 1591, + 423 + ], + [ + 1493, + 364 + ], + [ + 1457, + 367 + ], + [ + 1438, + 371 + ], + [ + 1431, + 375 + ], + [ + 1398, + 405 + ], + [ + 1375, + 417 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1521, + 235 + ], + [ + 1522, + 272 + ], + [ + 1541, + 272 + ], + [ + 1539, + 259 + ], + [ + 1530, + 259 + ], + [ + 1530, + 256 + ], + [ + 1539, + 255 + ], + [ + 1539, + 248 + ], + [ + 1530, + 245 + ], + [ + 1526, + 234 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1489, + 223 + ], + [ + 1480, + 224 + ], + [ + 1458, + 231 + ], + [ + 1437, + 251 + ], + [ + 1436, + 257 + ], + [ + 1459, + 235 + ], + [ + 1485, + 227 + ], + [ + 1492, + 227 + ], + [ + 1492, + 224 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1438, + 288 + ], + [ + 1439, + 245 + ], + [ + 1430, + 244 + ], + [ + 1428, + 289 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1487, + 214 + ], + [ + 1487, + 254 + ], + [ + 1500, + 253 + ], + [ + 1507, + 246 + ], + [ + 1506, + 222 + ], + [ + 1501, + 219 + ], + [ + 1499, + 213 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1334, + 509 + ], + [ + 1337, + 496 + ], + [ + 1384, + 493 + ], + [ + 1514, + 491 + ], + [ + 1624, + 489 + ], + [ + 1762, + 491 + ], + [ + 1908, + 487 + ], + [ + 1909, + 482 + ], + [ + 2048, + 478 + ], + [ + 2048, + 499 + ], + [ + 2001, + 500 + ], + [ + 1844, + 508 + ], + [ + 1748, + 512 + ], + [ + 1643, + 518 + ], + [ + 1481, + 529 + ], + [ + 1411, + 530 + ], + [ + 1387, + 528 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1155, + 7 + ], + [ + 1107, + 9 + ], + [ + 1107, + 5 + ], + [ + 1151, + 3 + ], + [ + 1161, + 3 + ], + [ + 1349, + 34 + ], + [ + 1361, + 39 + ], + [ + 1370, + 44 + ], + [ + 1380, + 54 + ], + [ + 1386, + 64 + ], + [ + 1390, + 74 + ], + [ + 1398, + 140 + ], + [ + 1388, + 134 + ], + [ + 1384, + 80 + ], + [ + 1380, + 66 + ], + [ + 1369, + 53 + ], + [ + 1356, + 45 + ], + [ + 1349, + 42 + ], + [ + 1336, + 38 + ], + [ + 1162, + 8 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1112, + 62 + ], + [ + 1110, + 0 + ], + [ + 1066, + 0 + ], + [ + 1068, + 60 + ], + [ + 1069, + 66 + ], + [ + 1072, + 68 + ], + [ + 1077, + 70 + ], + [ + 1106, + 69 + ], + [ + 1110, + 66 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1380, + 498 + ], + [ + 1379, + 221 + ], + [ + 1389, + 222 + ], + [ + 1393, + 498 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1366, + 354 + ], + [ + 1371, + 351 + ], + [ + 1381, + 351 + ], + [ + 1382, + 406 + ], + [ + 1371, + 405 + ], + [ + 1367, + 403 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1438, + 506 + ], + [ + 1427, + 0 + ], + [ + 1387, + 0 + ], + [ + 1396, + 506 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1413, + 232 + ], + [ + 1397, + 232 + ], + [ + 1390, + 237 + ], + [ + 1388, + 270 + ], + [ + 1390, + 304 + ], + [ + 1412, + 307 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1495, + 259 + ], + [ + 1427, + 263 + ], + [ + 1424, + 265 + ], + [ + 1424, + 268 + ], + [ + 1459, + 323 + ], + [ + 1462, + 324 + ], + [ + 1464, + 323 + ], + [ + 1498, + 264 + ], + [ + 1498, + 261 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1844, + 414 + ], + [ + 1841, + 279 + ], + [ + 1845, + 274 + ], + [ + 1847, + 280 + ], + [ + 1850, + 414 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1840, + 261 + ], + [ + 1841, + 279 + ], + [ + 1853, + 279 + ], + [ + 1859, + 277 + ], + [ + 1861, + 265 + ], + [ + 1854, + 265 + ], + [ + 1853, + 262 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1860, + 304 + ], + [ + 1847, + 304 + ], + [ + 1849, + 337 + ], + [ + 1863, + 332 + ], + [ + 1872, + 328 + ], + [ + 1872, + 322 + ], + [ + 1861, + 322 + ], + [ + 1861, + 317 + ], + [ + 1872, + 315 + ], + [ + 1872, + 308 + ], + [ + 1861, + 307 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1968, + 403 + ], + [ + 2000, + 385 + ], + [ + 2008, + 382 + ], + [ + 2048, + 376 + ], + [ + 2048, + 430 + ], + [ + 1951, + 424 + ], + [ + 1949, + 412 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1808, + 401 + ], + [ + 1808, + 452 + ], + [ + 2048, + 446 + ], + [ + 2048, + 396 + ], + [ + 1902, + 399 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1860, + 239 + ], + [ + 1859, + 244 + ], + [ + 1855, + 246 + ], + [ + 1854, + 250 + ], + [ + 1856, + 252 + ], + [ + 1859, + 254 + ], + [ + 1859, + 261 + ], + [ + 1855, + 267 + ], + [ + 1856, + 277 + ], + [ + 1858, + 281 + ], + [ + 1857, + 288 + ], + [ + 1855, + 294 + ], + [ + 1857, + 301 + ], + [ + 1860, + 304 + ], + [ + 1861, + 308 + ], + [ + 1875, + 308 + ], + [ + 1880, + 309 + ], + [ + 1877, + 235 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1883, + 167 + ], + [ + 1884, + 238 + ], + [ + 1884, + 419 + ], + [ + 1885, + 423 + ], + [ + 1887, + 493 + ], + [ + 1873, + 493 + ], + [ + 1872, + 424 + ], + [ + 1874, + 416 + ], + [ + 1874, + 194 + ], + [ + 1874, + 173 + ], + [ + 1874, + 167 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1866, + 173 + ], + [ + 1866, + 201 + ], + [ + 1878, + 194 + ], + [ + 1878, + 167 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1899, + 235 + ], + [ + 1877, + 235 + ], + [ + 1878, + 302 + ], + [ + 1879, + 313 + ], + [ + 1883, + 312 + ], + [ + 1884, + 305 + ], + [ + 1889, + 304 + ], + [ + 1895, + 302 + ], + [ + 1899, + 300 + ], + [ + 1900, + 295 + ], + [ + 1903, + 291 + ], + [ + 1903, + 287 + ], + [ + 1900, + 281 + ], + [ + 1900, + 275 + ], + [ + 1903, + 271 + ], + [ + 1904, + 265 + ], + [ + 1902, + 261 + ], + [ + 1899, + 259 + ], + [ + 1899, + 253 + ], + [ + 1901, + 249 + ], + [ + 1902, + 244 + ], + [ + 1900, + 237 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1481, + 544 + ], + [ + 1477, + 523 + ], + [ + 1474, + 505 + ], + [ + 1484, + 443 + ], + [ + 1483, + 423 + ], + [ + 1481, + 411 + ], + [ + 1484, + 404 + ], + [ + 1491, + 399 + ], + [ + 1495, + 385 + ], + [ + 1492, + 370 + ], + [ + 1492, + 356 + ], + [ + 1497, + 349 + ], + [ + 1504, + 345 + ], + [ + 1505, + 339 + ], + [ + 1501, + 330 + ], + [ + 1501, + 316 + ], + [ + 1499, + 299 + ], + [ + 1500, + 287 + ], + [ + 1509, + 276 + ], + [ + 1524, + 266 + ], + [ + 1535, + 265 + ], + [ + 1551, + 270 + ], + [ + 1562, + 279 + ], + [ + 1573, + 302 + ], + [ + 1573, + 328 + ], + [ + 1573, + 341 + ], + [ + 1584, + 349 + ], + [ + 1594, + 367 + ], + [ + 1605, + 394 + ], + [ + 1607, + 408 + ], + [ + 1597, + 433 + ], + [ + 1589, + 447 + ], + [ + 1597, + 479 + ], + [ + 1600, + 506 + ], + [ + 1600, + 522 + ], + [ + 1594, + 545 + ], + [ + 1574, + 607 + ], + [ + 1575, + 628 + ], + [ + 1584, + 653 + ], + [ + 1583, + 683 + ], + [ + 1577, + 707 + ], + [ + 1574, + 724 + ], + [ + 1575, + 738 + ], + [ + 1578, + 750 + ], + [ + 1576, + 779 + ], + [ + 1561, + 780 + ], + [ + 1561, + 766 + ], + [ + 1553, + 780 + ], + [ + 1511, + 779 + ], + [ + 1511, + 771 + ], + [ + 1481, + 771 + ], + [ + 1479, + 764 + ], + [ + 1497, + 754 + ], + [ + 1507, + 742 + ], + [ + 1508, + 718 + ], + [ + 1493, + 673 + ], + [ + 1474, + 631 + ], + [ + 1465, + 617 + ], + [ + 1464, + 603 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1600, + 263 + ], + [ + 1602, + 250 + ], + [ + 1613, + 233 + ], + [ + 1631, + 226 + ], + [ + 1664, + 228 + ], + [ + 1680, + 243 + ], + [ + 1689, + 264 + ], + [ + 1693, + 269 + ], + [ + 1710, + 272 + ], + [ + 1723, + 279 + ], + [ + 1732, + 294 + ], + [ + 1739, + 305 + ], + [ + 1749, + 319 + ], + [ + 1758, + 340 + ], + [ + 1763, + 360 + ], + [ + 1763, + 409 + ], + [ + 1764, + 430 + ], + [ + 1764, + 494 + ], + [ + 1754, + 502 + ], + [ + 1755, + 513 + ], + [ + 1750, + 527 + ], + [ + 1744, + 544 + ], + [ + 1742, + 560 + ], + [ + 1723, + 582 + ], + [ + 1721, + 606 + ], + [ + 1728, + 616 + ], + [ + 1743, + 640 + ], + [ + 1763, + 669 + ], + [ + 1776, + 685 + ], + [ + 1790, + 713 + ], + [ + 1811, + 740 + ], + [ + 1820, + 754 + ], + [ + 1820, + 762 + ], + [ + 1803, + 775 + ], + [ + 1792, + 788 + ], + [ + 1785, + 801 + ], + [ + 1773, + 808 + ], + [ + 1746, + 807 + ], + [ + 1744, + 802 + ], + [ + 1753, + 786 + ], + [ + 1761, + 764 + ], + [ + 1756, + 746 + ], + [ + 1725, + 713 + ], + [ + 1694, + 671 + ], + [ + 1690, + 675 + ], + [ + 1688, + 709 + ], + [ + 1684, + 728 + ], + [ + 1675, + 754 + ], + [ + 1674, + 769 + ], + [ + 1673, + 782 + ], + [ + 1675, + 806 + ], + [ + 1677, + 823 + ], + [ + 1675, + 829 + ], + [ + 1665, + 832 + ], + [ + 1611, + 834 + ], + [ + 1581, + 824 + ], + [ + 1581, + 816 + ], + [ + 1589, + 810 + ], + [ + 1617, + 801 + ], + [ + 1621, + 785 + ], + [ + 1630, + 769 + ], + [ + 1627, + 762 + ], + [ + 1635, + 681 + ], + [ + 1638, + 658 + ], + [ + 1633, + 647 + ], + [ + 1646, + 577 + ], + [ + 1644, + 525 + ], + [ + 1640, + 513 + ], + [ + 1624, + 524 + ], + [ + 1614, + 499 + ], + [ + 1612, + 472 + ], + [ + 1614, + 450 + ], + [ + 1605, + 443 + ], + [ + 1607, + 431 + ], + [ + 1616, + 420 + ], + [ + 1613, + 398 + ], + [ + 1612, + 358 + ], + [ + 1611, + 339 + ], + [ + 1616, + 323 + ], + [ + 1620, + 312 + ], + [ + 1617, + 301 + ], + [ + 1610, + 299 + ], + [ + 1604, + 280 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1353, + 132 + ], + [ + 1356, + 128 + ], + [ + 1474, + 123 + ], + [ + 1478, + 126 + ], + [ + 1477, + 134 + ], + [ + 1479, + 216 + ], + [ + 1477, + 223 + ], + [ + 1472, + 224 + ], + [ + 1360, + 229 + ], + [ + 1355, + 226 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000091_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000091_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..bb7bb4b22bf332d6a78c5754265087aea0f56e30 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000091_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000092_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000092_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..59fe7b1b102a2c5acd0aacbb04b8fc90efaec8b3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000092_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000092_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000092_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..748ff89d6576e2aa2c18af006645e746b3080806 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000092_000019_gtFine_polygons.json @@ -0,0 +1,2780 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1725, + 0 + ], + [ + 1733, + 122 + ], + [ + 1461, + 309 + ], + [ + 1453, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1564, + 459 + ], + [ + 1461, + 444 + ], + [ + 533, + 427 + ], + [ + 1, + 480 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 535 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1500, + 254 + ], + [ + 1494, + 260 + ], + [ + 1479, + 265 + ], + [ + 1465, + 271 + ], + [ + 1470, + 477 + ], + [ + 1487, + 489 + ], + [ + 1501, + 497 + ], + [ + 2048, + 547 + ], + [ + 2048, + 1 + ], + [ + 1697, + 1 + ], + [ + 1696, + 13 + ], + [ + 1697, + 121 + ], + [ + 1648, + 122 + ], + [ + 1640, + 154 + ], + [ + 1636, + 157 + ], + [ + 1638, + 164 + ], + [ + 1562, + 166 + ], + [ + 1556, + 207 + ], + [ + 1550, + 207 + ], + [ + 1546, + 211 + ], + [ + 1547, + 231 + ], + [ + 1533, + 231 + ], + [ + 1529, + 244 + ], + [ + 1518, + 246 + ], + [ + 1515, + 253 + ], + [ + 1511, + 253 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 954, + 499 + ], + [ + 907, + 501 + ], + [ + 847, + 501 + ], + [ + 745, + 505 + ], + [ + 726, + 504 + ], + [ + 570, + 507 + ], + [ + 531, + 506 + ], + [ + 532, + 491 + ], + [ + 734, + 474 + ], + [ + 944, + 451 + ], + [ + 1174, + 457 + ], + [ + 1474, + 468 + ], + [ + 1490, + 478 + ], + [ + 1500, + 496 + ], + [ + 1490, + 497 + ], + [ + 1443, + 494 + ], + [ + 1261, + 495 + ], + [ + 1159, + 494 + ], + [ + 1069, + 496 + ], + [ + 1021, + 494 + ], + [ + 961, + 498 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1492, + 430 + ], + [ + 1490, + 417 + ], + [ + 1497, + 408 + ], + [ + 1497, + 396 + ], + [ + 1497, + 367 + ], + [ + 1516, + 366 + ], + [ + 1520, + 342 + ], + [ + 1529, + 332 + ], + [ + 1536, + 328 + ], + [ + 1593, + 317 + ], + [ + 1608, + 314 + ], + [ + 1607, + 308 + ], + [ + 1638, + 303 + ], + [ + 1679, + 293 + ], + [ + 1701, + 294 + ], + [ + 1728, + 318 + ], + [ + 1736, + 367 + ], + [ + 1750, + 372 + ], + [ + 1734, + 408 + ], + [ + 1715, + 412 + ], + [ + 1715, + 426 + ], + [ + 1708, + 455 + ], + [ + 1485, + 487 + ], + [ + 1485, + 439 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1747, + 426 + ], + [ + 1742, + 388 + ], + [ + 1747, + 361 + ], + [ + 1756, + 329 + ], + [ + 1764, + 314 + ], + [ + 1776, + 304 + ], + [ + 1785, + 326 + ], + [ + 1804, + 350 + ], + [ + 1810, + 388 + ], + [ + 1802, + 406 + ], + [ + 1791, + 412 + ], + [ + 1780, + 431 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1776, + 359 + ], + [ + 1775, + 397 + ], + [ + 1788, + 397 + ], + [ + 1790, + 360 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1809, + 337 + ], + [ + 1801, + 342 + ], + [ + 1799, + 353 + ], + [ + 1803, + 361 + ], + [ + 1808, + 364 + ], + [ + 1816, + 364 + ], + [ + 1824, + 360 + ], + [ + 1827, + 349 + ], + [ + 1822, + 340 + ], + [ + 1817, + 337 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1812, + 394 + ], + [ + 1826, + 364 + ], + [ + 1793, + 367 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1701, + 273 + ], + [ + 1700, + 258 + ], + [ + 1687, + 253 + ], + [ + 1687, + 203 + ], + [ + 1690, + 186 + ], + [ + 1691, + 165 + ], + [ + 1700, + 148 + ], + [ + 1719, + 148 + ], + [ + 1732, + 125 + ], + [ + 1743, + 107 + ], + [ + 1752, + 99 + ], + [ + 1752, + 80 + ], + [ + 1767, + 48 + ], + [ + 1787, + 47 + ], + [ + 1808, + 28 + ], + [ + 1832, + 14 + ], + [ + 1856, + 5 + ], + [ + 1853, + 181 + ], + [ + 1854, + 262 + ], + [ + 1866, + 248 + ], + [ + 1868, + 273 + ], + [ + 1857, + 279 + ], + [ + 1851, + 299 + ], + [ + 1850, + 307 + ], + [ + 1834, + 311 + ], + [ + 1827, + 298 + ], + [ + 1827, + 286 + ], + [ + 1807, + 284 + ], + [ + 1809, + 267 + ], + [ + 1783, + 216 + ], + [ + 1774, + 210 + ], + [ + 1763, + 214 + ], + [ + 1763, + 224 + ], + [ + 1761, + 240 + ], + [ + 1754, + 246 + ], + [ + 1755, + 255 + ], + [ + 1759, + 256 + ], + [ + 1763, + 258 + ], + [ + 1764, + 270 + ], + [ + 1741, + 271 + ], + [ + 1730, + 282 + ], + [ + 1715, + 284 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1869, + 385 + ], + [ + 1824, + 389 + ], + [ + 1823, + 267 + ], + [ + 1855, + 263 + ], + [ + 1856, + 180 + ], + [ + 1821, + 183 + ], + [ + 1822, + 164 + ], + [ + 1855, + 142 + ], + [ + 1827, + 148 + ], + [ + 1788, + 173 + ], + [ + 1814, + 168 + ], + [ + 1814, + 183 + ], + [ + 1788, + 189 + ], + [ + 1789, + 268 + ], + [ + 1815, + 268 + ], + [ + 1816, + 345 + ], + [ + 1814, + 431 + ], + [ + 1876, + 427 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1482, + 358 + ], + [ + 1480, + 261 + ], + [ + 1476, + 1 + ], + [ + 0, + 2 + ], + [ + 1, + 602 + ], + [ + 539, + 526 + ], + [ + 548, + 499 + ], + [ + 686, + 496 + ], + [ + 774, + 492 + ], + [ + 805, + 492 + ], + [ + 847, + 490 + ], + [ + 867, + 487 + ], + [ + 885, + 482 + ], + [ + 945, + 481 + ], + [ + 1078, + 478 + ], + [ + 1205, + 476 + ], + [ + 1272, + 476 + ], + [ + 1332, + 476 + ], + [ + 1485, + 477 + ], + [ + 1488, + 426 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1490, + 367 + ], + [ + 1484, + 367 + ], + [ + 1483, + 403 + ], + [ + 1500, + 395 + ], + [ + 1500, + 389 + ], + [ + 1490, + 388 + ], + [ + 1500, + 385 + ], + [ + 1500, + 380 + ], + [ + 1491, + 380 + ], + [ + 1501, + 373 + ], + [ + 1501, + 367 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1481, + 398 + ], + [ + 1484, + 485 + ], + [ + 1487, + 485 + ], + [ + 1484, + 392 + ], + [ + 1482, + 394 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1473, + 402 + ], + [ + 1474, + 365 + ], + [ + 1488, + 365 + ], + [ + 1488, + 403 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1604, + 434 + ], + [ + 1594, + 429 + ], + [ + 1550, + 428 + ], + [ + 1511, + 433 + ], + [ + 1507, + 436 + ], + [ + 1503, + 459 + ], + [ + 1495, + 473 + ], + [ + 1499, + 497 + ], + [ + 1604, + 440 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1312, + 381 + ], + [ + 1312, + 392 + ], + [ + 1339, + 392 + ], + [ + 1338, + 381 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1325, + 418 + ], + [ + 1324, + 391 + ], + [ + 1315, + 391 + ], + [ + 1316, + 418 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1279, + 352 + ], + [ + 1272, + 353 + ], + [ + 1267, + 359 + ], + [ + 1266, + 366 + ], + [ + 1268, + 374 + ], + [ + 1272, + 378 + ], + [ + 1280, + 380 + ], + [ + 1286, + 379 + ], + [ + 1290, + 374 + ], + [ + 1292, + 369 + ], + [ + 1293, + 362 + ], + [ + 1292, + 358 + ], + [ + 1286, + 353 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1299, + 376 + ], + [ + 1262, + 377 + ], + [ + 1278, + 412 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1344, + 444 + ], + [ + 1320, + 446 + ], + [ + 1322, + 477 + ], + [ + 1363, + 460 + ], + [ + 1363, + 446 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1280, + 417 + ], + [ + 1279, + 442 + ], + [ + 1277, + 442 + ], + [ + 1276, + 456 + ], + [ + 1280, + 477 + ], + [ + 1295, + 477 + ], + [ + 1294, + 416 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1439, + 439 + ], + [ + 1396, + 442 + ], + [ + 1373, + 451 + ], + [ + 1369, + 455 + ], + [ + 1338, + 461 + ], + [ + 1325, + 466 + ], + [ + 1322, + 476 + ], + [ + 1323, + 482 + ], + [ + 1330, + 484 + ], + [ + 1339, + 485 + ], + [ + 1347, + 491 + ], + [ + 1358, + 491 + ], + [ + 1366, + 491 + ], + [ + 1372, + 486 + ], + [ + 1426, + 482 + ], + [ + 1437, + 489 + ], + [ + 1448, + 489 + ], + [ + 1459, + 487 + ], + [ + 1460, + 479 + ], + [ + 1468, + 479 + ], + [ + 1472, + 468 + ], + [ + 1470, + 458 + ], + [ + 1454, + 445 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1195, + 330 + ], + [ + 1200, + 338 + ], + [ + 1206, + 490 + ], + [ + 1197, + 499 + ], + [ + 1267, + 498 + ], + [ + 1265, + 330 + ], + [ + 1271, + 325 + ], + [ + 1251, + 322 + ], + [ + 1226, + 322 + ], + [ + 1207, + 324 + ], + [ + 1191, + 332 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1233, + 503 + ], + [ + 1233, + 497 + ], + [ + 1254, + 493 + ], + [ + 1304, + 493 + ], + [ + 1349, + 492 + ], + [ + 1439, + 492 + ], + [ + 1443, + 492 + ], + [ + 1450, + 498 + ], + [ + 1363, + 502 + ], + [ + 1271, + 505 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1300, + 478 + ], + [ + 1299, + 495 + ], + [ + 1303, + 495 + ], + [ + 1302, + 471 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1312, + 446 + ], + [ + 1288, + 446 + ], + [ + 1289, + 480 + ], + [ + 1314, + 479 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1255, + 495 + ], + [ + 1252, + 282 + ], + [ + 1245, + 270 + ], + [ + 1231, + 263 + ], + [ + 1133, + 248 + ], + [ + 1118, + 249 + ], + [ + 1114, + 246 + ], + [ + 1140, + 245 + ], + [ + 1235, + 259 + ], + [ + 1250, + 269 + ], + [ + 1255, + 279 + ], + [ + 1256, + 305 + ], + [ + 1261, + 495 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1101, + 238 + ], + [ + 1100, + 280 + ], + [ + 1121, + 280 + ], + [ + 1121, + 237 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1243, + 371 + ], + [ + 1245, + 401 + ], + [ + 1252, + 404 + ], + [ + 1266, + 405 + ], + [ + 1266, + 367 + ], + [ + 1253, + 367 + ], + [ + 1250, + 369 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 792, + 333 + ], + [ + 796, + 497 + ], + [ + 799, + 498 + ], + [ + 796, + 334 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 808, + 423 + ], + [ + 796, + 420 + ], + [ + 793, + 324 + ], + [ + 807, + 336 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 808, + 438 + ], + [ + 806, + 445 + ], + [ + 806, + 452 + ], + [ + 808, + 469 + ], + [ + 809, + 475 + ], + [ + 808, + 482 + ], + [ + 810, + 496 + ], + [ + 822, + 496 + ], + [ + 825, + 493 + ], + [ + 823, + 479 + ], + [ + 824, + 468 + ], + [ + 824, + 463 + ], + [ + 819, + 451 + ], + [ + 815, + 445 + ], + [ + 811, + 439 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1080, + 485 + ], + [ + 1082, + 476 + ], + [ + 1077, + 463 + ], + [ + 1057, + 450 + ], + [ + 1006, + 449 + ], + [ + 978, + 464 + ], + [ + 941, + 471 + ], + [ + 938, + 478 + ], + [ + 938, + 485 + ], + [ + 938, + 491 + ], + [ + 951, + 495 + ], + [ + 960, + 498 + ], + [ + 965, + 498 + ], + [ + 979, + 493 + ], + [ + 1039, + 493 + ], + [ + 1045, + 496 + ], + [ + 1054, + 496 + ], + [ + 1059, + 493 + ], + [ + 1062, + 487 + ], + [ + 1072, + 487 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 583, + 476 + ], + [ + 584, + 515 + ], + [ + 587, + 515 + ], + [ + 586, + 476 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 637, + 473 + ], + [ + 638, + 500 + ], + [ + 641, + 500 + ], + [ + 640, + 473 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 647, + 477 + ], + [ + 647, + 507 + ], + [ + 650, + 507 + ], + [ + 649, + 477 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 864, + 512 + ], + [ + 680, + 514 + ], + [ + 577, + 514 + ], + [ + 520, + 514 + ], + [ + 453, + 519 + ], + [ + 391, + 526 + ], + [ + 252, + 542 + ], + [ + 172, + 549 + ], + [ + 0, + 562 + ], + [ + 0, + 1023 + ], + [ + 13, + 1023 + ], + [ + 226, + 906 + ], + [ + 469, + 768 + ], + [ + 624, + 677 + ], + [ + 735, + 613 + ], + [ + 824, + 564 + ], + [ + 883, + 530 + ], + [ + 894, + 524 + ], + [ + 894, + 516 + ], + [ + 886, + 513 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 724, + 267 + ], + [ + 726, + 418 + ], + [ + 730, + 419 + ], + [ + 727, + 263 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 684, + 351 + ], + [ + 685, + 362 + ], + [ + 725, + 361 + ], + [ + 724, + 349 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 721, + 534 + ], + [ + 720, + 495 + ], + [ + 717, + 415 + ], + [ + 716, + 363 + ], + [ + 711, + 366 + ], + [ + 711, + 418 + ], + [ + 715, + 534 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 715, + 336 + ], + [ + 710, + 338 + ], + [ + 706, + 345 + ], + [ + 706, + 354 + ], + [ + 709, + 361 + ], + [ + 711, + 363 + ], + [ + 715, + 364 + ], + [ + 719, + 364 + ], + [ + 722, + 362 + ], + [ + 725, + 355 + ], + [ + 724, + 345 + ], + [ + 723, + 340 + ], + [ + 719, + 336 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 707, + 379 + ], + [ + 707, + 364 + ], + [ + 726, + 364 + ], + [ + 726, + 379 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 737, + 348 + ], + [ + 738, + 343 + ], + [ + 737, + 339 + ], + [ + 737, + 336 + ], + [ + 738, + 332 + ], + [ + 738, + 327 + ], + [ + 737, + 313 + ], + [ + 732, + 311 + ], + [ + 723, + 312 + ], + [ + 725, + 353 + ], + [ + 729, + 353 + ], + [ + 732, + 350 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 735, + 372 + ], + [ + 726, + 373 + ], + [ + 724, + 380 + ], + [ + 725, + 412 + ], + [ + 725, + 418 + ], + [ + 727, + 418 + ], + [ + 731, + 412 + ], + [ + 734, + 410 + ], + [ + 737, + 408 + ], + [ + 739, + 401 + ], + [ + 739, + 390 + ], + [ + 737, + 386 + ], + [ + 738, + 382 + ], + [ + 739, + 377 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 677, + 411 + ], + [ + 677, + 535 + ], + [ + 684, + 535 + ], + [ + 685, + 498 + ], + [ + 734, + 498 + ], + [ + 734, + 532 + ], + [ + 738, + 532 + ], + [ + 738, + 413 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 524, + 465 + ], + [ + 519, + 472 + ], + [ + 520, + 508 + ], + [ + 520, + 514 + ], + [ + 521, + 515 + ], + [ + 547, + 516 + ], + [ + 579, + 514 + ], + [ + 580, + 510 + ], + [ + 577, + 506 + ], + [ + 576, + 504 + ], + [ + 575, + 475 + ], + [ + 577, + 472 + ], + [ + 577, + 467 + ], + [ + 559, + 465 + ], + [ + 533, + 464 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 485, + 461 + ], + [ + 455, + 462 + ], + [ + 450, + 467 + ], + [ + 451, + 481 + ], + [ + 454, + 520 + ], + [ + 492, + 519 + ], + [ + 494, + 516 + ], + [ + 494, + 511 + ], + [ + 492, + 508 + ], + [ + 489, + 508 + ], + [ + 488, + 511 + ], + [ + 488, + 479 + ], + [ + 489, + 469 + ], + [ + 491, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1682, + 465 + ], + [ + 1676, + 441 + ], + [ + 1663, + 435 + ], + [ + 1638, + 434 + ], + [ + 1603, + 432 + ], + [ + 1558, + 436 + ], + [ + 1530, + 443 + ], + [ + 1523, + 451 + ], + [ + 1515, + 470 + ], + [ + 1503, + 469 + ], + [ + 1486, + 473 + ], + [ + 1485, + 481 + ], + [ + 1491, + 484 + ], + [ + 1509, + 480 + ], + [ + 1492, + 489 + ], + [ + 1486, + 498 + ], + [ + 1483, + 509 + ], + [ + 1486, + 543 + ], + [ + 1488, + 564 + ], + [ + 1491, + 568 + ], + [ + 1515, + 574 + ], + [ + 1527, + 579 + ], + [ + 1541, + 579 + ], + [ + 1548, + 578 + ], + [ + 1553, + 570 + ], + [ + 1555, + 559 + ], + [ + 1611, + 558 + ], + [ + 1662, + 518 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1600, + 585 + ], + [ + 1598, + 556 + ], + [ + 1601, + 531 + ], + [ + 1608, + 512 + ], + [ + 1618, + 499 + ], + [ + 1625, + 484 + ], + [ + 1636, + 478 + ], + [ + 1654, + 454 + ], + [ + 1675, + 436 + ], + [ + 1695, + 427 + ], + [ + 1728, + 422 + ], + [ + 1772, + 419 + ], + [ + 1815, + 417 + ], + [ + 1840, + 420 + ], + [ + 1895, + 429 + ], + [ + 1923, + 439 + ], + [ + 1820, + 591 + ], + [ + 1765, + 594 + ], + [ + 1762, + 608 + ], + [ + 1746, + 624 + ], + [ + 1722, + 628 + ], + [ + 1702, + 621 + ], + [ + 1692, + 591 + ], + [ + 1641, + 585 + ], + [ + 1638, + 597 + ], + [ + 1628, + 602 + ], + [ + 1609, + 601 + ], + [ + 1604, + 592 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1957, + 408 + ], + [ + 1917, + 436 + ], + [ + 1884, + 470 + ], + [ + 1878, + 477 + ], + [ + 1864, + 478 + ], + [ + 1848, + 484 + ], + [ + 1845, + 493 + ], + [ + 1846, + 501 + ], + [ + 1852, + 505 + ], + [ + 1828, + 536 + ], + [ + 1813, + 571 + ], + [ + 1813, + 606 + ], + [ + 1830, + 645 + ], + [ + 1844, + 652 + ], + [ + 1864, + 652 + ], + [ + 1879, + 647 + ], + [ + 1887, + 636 + ], + [ + 1888, + 627 + ], + [ + 1998, + 643 + ], + [ + 2000, + 671 + ], + [ + 2012, + 691 + ], + [ + 2029, + 699 + ], + [ + 2048, + 701 + ], + [ + 2048, + 382 + ], + [ + 2019, + 384 + ], + [ + 1981, + 394 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 880, + 531 + ], + [ + 866, + 533 + ], + [ + 830, + 537 + ], + [ + 753, + 543 + ], + [ + 643, + 551 + ], + [ + 572, + 559 + ], + [ + 487, + 580 + ], + [ + 283, + 621 + ], + [ + 0, + 687 + ], + [ + 0, + 687 + ], + [ + 0, + 1023 + ], + [ + 14, + 1023 + ], + [ + 681, + 655 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8319b56a6542aaa465fb81fca180d66ff82094ae Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..fea833ceb05b68a42fd89b5a1eb21a1f5dcea88e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..e95c86c9b2e8e932e909d3490ec1d8f95a5bdaab --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000094_000019_gtFine_polygons.json @@ -0,0 +1,5160 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 1274, + 0 + ], + [ + 1269, + 208 + ], + [ + 1188, + 377 + ], + [ + 1147, + 381 + ], + [ + 1047, + 359 + ], + [ + 333, + 184 + ], + [ + 269, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2048, + 438 + ], + [ + 1147, + 417 + ], + [ + 0, + 402 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1189, + 263 + ], + [ + 1187, + 1 + ], + [ + 1289, + 1 + ], + [ + 1293, + 185 + ], + [ + 1207, + 283 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 0 + ], + [ + 1, + 171 + ], + [ + 203, + 318 + ], + [ + 237, + 305 + ], + [ + 353, + 82 + ], + [ + 372, + 0 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1164, + 367 + ], + [ + 1141, + 367 + ], + [ + 1142, + 362 + ], + [ + 1133, + 359 + ], + [ + 1131, + 353 + ], + [ + 1126, + 347 + ], + [ + 1115, + 346 + ], + [ + 1114, + 344 + ], + [ + 1095, + 345 + ], + [ + 1095, + 331 + ], + [ + 1076, + 333 + ], + [ + 1042, + 377 + ], + [ + 1036, + 419 + ], + [ + 1051, + 437 + ], + [ + 1061, + 443 + ], + [ + 1171, + 449 + ], + [ + 1172, + 417 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 829, + 469 + ], + [ + 0, + 488 + ], + [ + 0, + 328 + ], + [ + 283, + 323 + ], + [ + 632, + 308 + ], + [ + 1062, + 364 + ], + [ + 1079, + 372 + ], + [ + 1117, + 459 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2047, + 554 + ], + [ + 1859, + 537 + ], + [ + 1794, + 431 + ], + [ + 1764, + 241 + ], + [ + 1847, + 1 + ], + [ + 2047, + 1 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1179, + 427 + ], + [ + 1157, + 419 + ], + [ + 1148, + 415 + ], + [ + 1144, + 403 + ], + [ + 1146, + 396 + ], + [ + 1146, + 388 + ], + [ + 1152, + 381 + ], + [ + 1158, + 365 + ], + [ + 1153, + 354 + ], + [ + 1152, + 334 + ], + [ + 1154, + 330 + ], + [ + 1148, + 323 + ], + [ + 1153, + 321 + ], + [ + 1158, + 314 + ], + [ + 1156, + 308 + ], + [ + 1152, + 306 + ], + [ + 1148, + 299 + ], + [ + 1141, + 296 + ], + [ + 1147, + 289 + ], + [ + 1144, + 284 + ], + [ + 1152, + 281 + ], + [ + 1159, + 277 + ], + [ + 1160, + 267 + ], + [ + 1163, + 267 + ], + [ + 1172, + 259 + ], + [ + 1178, + 256 + ], + [ + 1181, + 260 + ], + [ + 1188, + 252 + ], + [ + 1202, + 247 + ], + [ + 1202, + 245 + ], + [ + 1203, + 238 + ], + [ + 1199, + 220 + ], + [ + 1192, + 212 + ], + [ + 1200, + 200 + ], + [ + 1211, + 198 + ], + [ + 1212, + 181 + ], + [ + 1197, + 176 + ], + [ + 1213, + 169 + ], + [ + 1217, + 159 + ], + [ + 1228, + 158 + ], + [ + 1224, + 153 + ], + [ + 1228, + 145 + ], + [ + 1217, + 144 + ], + [ + 1210, + 135 + ], + [ + 1225, + 134 + ], + [ + 1232, + 124 + ], + [ + 1232, + 120 + ], + [ + 1222, + 126 + ], + [ + 1222, + 115 + ], + [ + 1216, + 115 + ], + [ + 1209, + 121 + ], + [ + 1202, + 123 + ], + [ + 1205, + 115 + ], + [ + 1195, + 111 + ], + [ + 1195, + 100 + ], + [ + 1200, + 92 + ], + [ + 1203, + 91 + ], + [ + 1204, + 81 + ], + [ + 1213, + 70 + ], + [ + 1222, + 79 + ], + [ + 1231, + 77 + ], + [ + 1227, + 59 + ], + [ + 1230, + 58 + ], + [ + 1230, + 43 + ], + [ + 1237, + 43 + ], + [ + 1240, + 35 + ], + [ + 1245, + 37 + ], + [ + 1241, + 29 + ], + [ + 1241, + 18 + ], + [ + 1243, + 13 + ], + [ + 1240, + 11 + ], + [ + 1235, + 13 + ], + [ + 1223, + 10 + ], + [ + 1222, + 5 + ], + [ + 1215, + 0 + ], + [ + 1921, + 1 + ], + [ + 1917, + 6 + ], + [ + 1900, + 10 + ], + [ + 1890, + 0 + ], + [ + 1873, + 2 + ], + [ + 1864, + 8 + ], + [ + 1878, + 26 + ], + [ + 1868, + 42 + ], + [ + 1878, + 46 + ], + [ + 1882, + 53 + ], + [ + 1894, + 61 + ], + [ + 1885, + 67 + ], + [ + 1891, + 70 + ], + [ + 1877, + 82 + ], + [ + 1889, + 85 + ], + [ + 1893, + 98 + ], + [ + 1875, + 98 + ], + [ + 1879, + 108 + ], + [ + 1887, + 118 + ], + [ + 1877, + 126 + ], + [ + 1882, + 138 + ], + [ + 1872, + 164 + ], + [ + 1876, + 171 + ], + [ + 1876, + 235 + ], + [ + 1774, + 260 + ], + [ + 1776, + 277 + ], + [ + 1799, + 293 + ], + [ + 1800, + 295 + ], + [ + 1800, + 338 + ], + [ + 1844, + 337 + ], + [ + 1854, + 333 + ], + [ + 1854, + 291 + ], + [ + 1879, + 289 + ], + [ + 1880, + 308 + ], + [ + 1925, + 322 + ], + [ + 1924, + 424 + ], + [ + 1814, + 426 + ], + [ + 1305, + 476 + ], + [ + 1204, + 473 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 151, + 507 + ], + [ + 0, + 509 + ], + [ + 1, + 478 + ], + [ + 35, + 478 + ], + [ + 84, + 477 + ], + [ + 145, + 474 + ], + [ + 226, + 469 + ], + [ + 258, + 470 + ], + [ + 345, + 467 + ], + [ + 412, + 464 + ], + [ + 438, + 465 + ], + [ + 458, + 463 + ], + [ + 480, + 461 + ], + [ + 551, + 463 + ], + [ + 581, + 464 + ], + [ + 704, + 462 + ], + [ + 760, + 460 + ], + [ + 811, + 456 + ], + [ + 817, + 454 + ], + [ + 872, + 453 + ], + [ + 906, + 452 + ], + [ + 921, + 454 + ], + [ + 943, + 453 + ], + [ + 952, + 450 + ], + [ + 996, + 449 + ], + [ + 1011, + 444 + ], + [ + 1056, + 437 + ], + [ + 1102, + 436 + ], + [ + 1157, + 437 + ], + [ + 1159, + 441 + ], + [ + 1169, + 441 + ], + [ + 1226, + 458 + ], + [ + 1284, + 461 + ], + [ + 1329, + 463 + ], + [ + 1300, + 501 + ], + [ + 1280, + 499 + ], + [ + 1132, + 497 + ], + [ + 1020, + 499 + ], + [ + 939, + 503 + ], + [ + 856, + 504 + ], + [ + 785, + 506 + ], + [ + 732, + 508 + ], + [ + 614, + 508 + ], + [ + 475, + 509 + ], + [ + 353, + 507 + ], + [ + 251, + 506 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 686, + 508 + ], + [ + 695, + 507 + ], + [ + 769, + 500 + ], + [ + 831, + 493 + ], + [ + 905, + 488 + ], + [ + 976, + 483 + ], + [ + 1005, + 479 + ], + [ + 1044, + 475 + ], + [ + 1061, + 473 + ], + [ + 1078, + 455 + ], + [ + 1104, + 449 + ], + [ + 1074, + 448 + ], + [ + 1073, + 437 + ], + [ + 1102, + 436 + ], + [ + 1157, + 437 + ], + [ + 1159, + 441 + ], + [ + 1169, + 441 + ], + [ + 1226, + 458 + ], + [ + 1284, + 461 + ], + [ + 1323, + 463 + ], + [ + 1318, + 477 + ], + [ + 1300, + 501 + ], + [ + 1280, + 499 + ], + [ + 1132, + 497 + ], + [ + 1020, + 499 + ], + [ + 939, + 503 + ], + [ + 856, + 504 + ], + [ + 785, + 506 + ], + [ + 732, + 508 + ], + [ + 686, + 508 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 238, + 546 + ], + [ + 234, + 537 + ], + [ + 160, + 533 + ], + [ + 48, + 536 + ], + [ + 1, + 538 + ], + [ + 0, + 565 + ], + [ + 7, + 565 + ], + [ + 6, + 555 + ], + [ + 209, + 546 + ], + [ + 226, + 546 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 231, + 721 + ], + [ + 60, + 778 + ], + [ + 0, + 799 + ], + [ + 1, + 1023 + ], + [ + 175, + 1024 + ], + [ + 358, + 882 + ], + [ + 516, + 746 + ], + [ + 505, + 715 + ], + [ + 234, + 721 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1246, + 552 + ], + [ + 1204, + 547 + ], + [ + 1179, + 542 + ], + [ + 1170, + 537 + ], + [ + 1172, + 531 + ], + [ + 1183, + 526 + ], + [ + 1212, + 517 + ], + [ + 1262, + 518 + ], + [ + 1262, + 535 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1267, + 510 + ], + [ + 1227, + 510 + ], + [ + 1202, + 512 + ], + [ + 1183, + 518 + ], + [ + 1181, + 527 + ], + [ + 1191, + 526 + ], + [ + 1224, + 524 + ], + [ + 1257, + 524 + ], + [ + 1267, + 516 + ] + ] + }, + { + "label": "train", + "polygon": [ + [ + 1172, + 399 + ], + [ + 1167, + 409 + ], + [ + 1166, + 456 + ], + [ + 1167, + 462 + ], + [ + 1172, + 466 + ], + [ + 1214, + 467 + ], + [ + 1223, + 463 + ], + [ + 1225, + 447 + ], + [ + 1221, + 403 + ], + [ + 1216, + 399 + ], + [ + 1190, + 397 + ], + [ + 1176, + 397 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 61, + 469 + ], + [ + 49, + 467 + ], + [ + 32, + 469 + ], + [ + 31, + 480 + ], + [ + 45, + 482 + ], + [ + 64, + 480 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 179, + 436 + ], + [ + 148, + 436 + ], + [ + 129, + 439 + ], + [ + 123, + 444 + ], + [ + 151, + 469 + ], + [ + 163, + 469 + ], + [ + 162, + 473 + ], + [ + 165, + 474 + ], + [ + 181, + 473 + ], + [ + 202, + 470 + ], + [ + 205, + 463 + ], + [ + 204, + 451 + ], + [ + 191, + 438 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 247, + 440 + ], + [ + 226, + 434 + ], + [ + 198, + 445 + ], + [ + 195, + 454 + ], + [ + 199, + 471 + ], + [ + 225, + 470 + ], + [ + 240, + 470 + ], + [ + 249, + 469 + ], + [ + 254, + 454 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 139, + 450 + ], + [ + 127, + 442 + ], + [ + 118, + 439 + ], + [ + 123, + 472 + ], + [ + 136, + 472 + ], + [ + 137, + 475 + ], + [ + 152, + 474 + ], + [ + 153, + 462 + ], + [ + 150, + 457 + ], + [ + 147, + 454 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 83, + 446 + ], + [ + 73, + 442 + ], + [ + 48, + 440 + ], + [ + 34, + 442 + ], + [ + 23, + 449 + ], + [ + 18, + 455 + ], + [ + 13, + 476 + ], + [ + 19, + 479 + ], + [ + 35, + 478 + ], + [ + 36, + 474 + ], + [ + 50, + 473 + ], + [ + 58, + 478 + ], + [ + 85, + 478 + ], + [ + 88, + 464 + ], + [ + 87, + 452 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 548, + 449 + ], + [ + 547, + 407 + ], + [ + 546, + 400 + ], + [ + 545, + 400 + ], + [ + 545, + 449 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 559, + 449 + ], + [ + 553, + 447 + ], + [ + 551, + 443 + ], + [ + 547, + 440 + ], + [ + 543, + 442 + ], + [ + 539, + 440 + ], + [ + 534, + 440 + ], + [ + 529, + 446 + ], + [ + 531, + 462 + ], + [ + 562, + 463 + ], + [ + 562, + 452 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 485, + 437 + ], + [ + 476, + 437 + ], + [ + 464, + 442 + ], + [ + 467, + 463 + ], + [ + 483, + 466 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 447, + 427 + ], + [ + 425, + 425 + ], + [ + 397, + 426 + ], + [ + 387, + 434 + ], + [ + 390, + 452 + ], + [ + 432, + 463 + ], + [ + 435, + 466 + ], + [ + 440, + 467 + ], + [ + 445, + 467 + ], + [ + 447, + 466 + ], + [ + 449, + 464 + ], + [ + 470, + 464 + ], + [ + 473, + 459 + ], + [ + 474, + 452 + ], + [ + 471, + 446 + ], + [ + 468, + 443 + ], + [ + 462, + 439 + ], + [ + 455, + 432 + ], + [ + 451, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 414, + 449 + ], + [ + 406, + 445 + ], + [ + 397, + 442 + ], + [ + 394, + 442 + ], + [ + 391, + 445 + ], + [ + 395, + 472 + ], + [ + 402, + 472 + ], + [ + 409, + 471 + ], + [ + 414, + 466 + ], + [ + 416, + 454 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 399, + 483 + ], + [ + 397, + 442 + ], + [ + 348, + 442 + ], + [ + 341, + 442 + ], + [ + 333, + 466 + ], + [ + 338, + 482 + ], + [ + 360, + 484 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1123, + 448 + ], + [ + 1053, + 447 + ], + [ + 1026, + 459 + ], + [ + 1024, + 466 + ], + [ + 1011, + 468 + ], + [ + 965, + 472 + ], + [ + 1032, + 473 + ], + [ + 1061, + 474 + ], + [ + 1086, + 465 + ], + [ + 1092, + 457 + ], + [ + 1106, + 455 + ], + [ + 1110, + 457 + ], + [ + 1114, + 454 + ], + [ + 1121, + 451 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 194, + 228 + ], + [ + 179, + 219 + ], + [ + 171, + 206 + ], + [ + 167, + 206 + ], + [ + 161, + 207 + ], + [ + 151, + 207 + ], + [ + 146, + 200 + ], + [ + 150, + 197 + ], + [ + 161, + 193 + ], + [ + 158, + 189 + ], + [ + 142, + 183 + ], + [ + 148, + 181 + ], + [ + 145, + 171 + ], + [ + 160, + 176 + ], + [ + 168, + 174 + ], + [ + 167, + 158 + ], + [ + 154, + 152 + ], + [ + 0, + 1 + ], + [ + 0, + 477 + ], + [ + 18, + 476 + ], + [ + 21, + 452 + ], + [ + 26, + 450 + ], + [ + 27, + 487 + ], + [ + 36, + 487 + ], + [ + 35, + 443 + ], + [ + 44, + 439 + ], + [ + 51, + 431 + ], + [ + 53, + 457 + ], + [ + 54, + 478 + ], + [ + 48, + 488 + ], + [ + 75, + 488 + ], + [ + 71, + 479 + ], + [ + 67, + 437 + ], + [ + 68, + 425 + ], + [ + 86, + 418 + ], + [ + 130, + 412 + ], + [ + 136, + 409 + ], + [ + 135, + 398 + ], + [ + 165, + 400 + ], + [ + 180, + 400 + ], + [ + 225, + 437 + ], + [ + 237, + 455 + ], + [ + 248, + 466 + ], + [ + 253, + 472 + ], + [ + 267, + 472 + ], + [ + 263, + 477 + ], + [ + 270, + 485 + ], + [ + 271, + 493 + ], + [ + 359, + 490 + ], + [ + 363, + 481 + ], + [ + 361, + 462 + ], + [ + 343, + 468 + ], + [ + 343, + 457 + ], + [ + 350, + 458 + ], + [ + 346, + 452 + ], + [ + 346, + 443 + ], + [ + 350, + 437 + ], + [ + 347, + 429 + ], + [ + 338, + 426 + ], + [ + 336, + 407 + ], + [ + 353, + 382 + ], + [ + 359, + 369 + ], + [ + 375, + 352 + ], + [ + 393, + 355 + ], + [ + 390, + 366 + ], + [ + 400, + 378 + ], + [ + 414, + 394 + ], + [ + 413, + 449 + ], + [ + 411, + 487 + ], + [ + 409, + 488 + ], + [ + 433, + 489 + ], + [ + 429, + 483 + ], + [ + 427, + 440 + ], + [ + 426, + 417 + ], + [ + 428, + 381 + ], + [ + 436, + 363 + ], + [ + 440, + 379 + ], + [ + 458, + 377 + ], + [ + 460, + 380 + ], + [ + 458, + 387 + ], + [ + 466, + 388 + ], + [ + 472, + 392 + ], + [ + 467, + 397 + ], + [ + 475, + 398 + ], + [ + 490, + 401 + ], + [ + 495, + 411 + ], + [ + 535, + 471 + ], + [ + 540, + 471 + ], + [ + 537, + 445 + ], + [ + 537, + 414 + ], + [ + 545, + 417 + ], + [ + 547, + 411 + ], + [ + 553, + 412 + ], + [ + 559, + 416 + ], + [ + 561, + 470 + ], + [ + 559, + 477 + ], + [ + 557, + 482 + ], + [ + 565, + 482 + ], + [ + 566, + 489 + ], + [ + 683, + 483 + ], + [ + 694, + 476 + ], + [ + 696, + 456 + ], + [ + 699, + 428 + ], + [ + 707, + 426 + ], + [ + 717, + 419 + ], + [ + 719, + 467 + ], + [ + 727, + 466 + ], + [ + 726, + 423 + ], + [ + 734, + 427 + ], + [ + 748, + 433 + ], + [ + 745, + 480 + ], + [ + 741, + 489 + ], + [ + 764, + 488 + ], + [ + 763, + 437 + ], + [ + 763, + 398 + ], + [ + 784, + 405 + ], + [ + 800, + 411 + ], + [ + 812, + 410 + ], + [ + 824, + 414 + ], + [ + 829, + 461 + ], + [ + 836, + 460 + ], + [ + 838, + 418 + ], + [ + 842, + 411 + ], + [ + 857, + 407 + ], + [ + 867, + 424 + ], + [ + 871, + 461 + ], + [ + 879, + 465 + ], + [ + 875, + 477 + ], + [ + 893, + 477 + ], + [ + 891, + 447 + ], + [ + 887, + 413 + ], + [ + 906, + 413 + ], + [ + 904, + 465 + ], + [ + 907, + 465 + ], + [ + 907, + 461 + ], + [ + 913, + 461 + ], + [ + 913, + 457 + ], + [ + 913, + 427 + ], + [ + 929, + 426 + ], + [ + 936, + 425 + ], + [ + 938, + 425 + ], + [ + 934, + 473 + ], + [ + 947, + 473 + ], + [ + 949, + 433 + ], + [ + 950, + 426 + ], + [ + 952, + 433 + ], + [ + 949, + 457 + ], + [ + 957, + 459 + ], + [ + 956, + 467 + ], + [ + 977, + 464 + ], + [ + 989, + 465 + ], + [ + 1013, + 466 + ], + [ + 1021, + 465 + ], + [ + 1026, + 458 + ], + [ + 1042, + 409 + ], + [ + 1071, + 389 + ], + [ + 1074, + 384 + ], + [ + 1079, + 367 + ], + [ + 1079, + 357 + ], + [ + 1081, + 347 + ], + [ + 1099, + 328 + ], + [ + 1083, + 325 + ], + [ + 1091, + 311 + ], + [ + 1081, + 317 + ], + [ + 1074, + 322 + ], + [ + 1071, + 319 + ], + [ + 1076, + 314 + ], + [ + 1068, + 309 + ], + [ + 1066, + 308 + ], + [ + 1069, + 292 + ], + [ + 1069, + 284 + ], + [ + 1077, + 265 + ], + [ + 1084, + 261 + ], + [ + 1071, + 258 + ], + [ + 1063, + 252 + ], + [ + 1070, + 244 + ], + [ + 1060, + 244 + ], + [ + 1053, + 236 + ], + [ + 1056, + 226 + ], + [ + 1047, + 223 + ], + [ + 1039, + 218 + ], + [ + 1053, + 208 + ], + [ + 1059, + 200 + ], + [ + 1052, + 179 + ], + [ + 1054, + 168 + ], + [ + 1040, + 170 + ], + [ + 1031, + 175 + ], + [ + 1030, + 183 + ], + [ + 1022, + 187 + ], + [ + 1017, + 185 + ], + [ + 1017, + 177 + ], + [ + 1026, + 170 + ], + [ + 1038, + 155 + ], + [ + 1029, + 158 + ], + [ + 1018, + 159 + ], + [ + 1008, + 155 + ], + [ + 1005, + 152 + ], + [ + 1016, + 140 + ], + [ + 1016, + 126 + ], + [ + 1010, + 129 + ], + [ + 998, + 106 + ], + [ + 980, + 122 + ], + [ + 980, + 96 + ], + [ + 974, + 75 + ], + [ + 964, + 82 + ], + [ + 959, + 67 + ], + [ + 943, + 63 + ], + [ + 942, + 58 + ], + [ + 934, + 54 + ], + [ + 929, + 46 + ], + [ + 920, + 46 + ], + [ + 920, + 34 + ], + [ + 918, + 30 + ], + [ + 910, + 33 + ], + [ + 903, + 35 + ], + [ + 897, + 27 + ], + [ + 891, + 21 + ], + [ + 901, + 12 + ], + [ + 899, + 0 + ], + [ + 574, + 1 + ], + [ + 566, + 7 + ], + [ + 583, + 14 + ], + [ + 601, + 20 + ], + [ + 605, + 30 + ], + [ + 585, + 26 + ], + [ + 578, + 43 + ], + [ + 567, + 28 + ], + [ + 557, + 35 + ], + [ + 563, + 49 + ], + [ + 557, + 46 + ], + [ + 558, + 59 + ], + [ + 551, + 59 + ], + [ + 541, + 60 + ], + [ + 529, + 56 + ], + [ + 519, + 59 + ], + [ + 515, + 45 + ], + [ + 508, + 48 + ], + [ + 497, + 41 + ], + [ + 493, + 32 + ], + [ + 497, + 29 + ], + [ + 496, + 19 + ], + [ + 502, + 13 + ], + [ + 501, + 7 + ], + [ + 505, + 1 + ], + [ + 317, + 1 + ], + [ + 315, + 5 + ], + [ + 317, + 16 + ], + [ + 296, + 3 + ], + [ + 297, + 18 + ], + [ + 302, + 35 + ], + [ + 310, + 35 + ], + [ + 320, + 62 + ], + [ + 317, + 74 + ], + [ + 309, + 70 + ], + [ + 307, + 64 + ], + [ + 293, + 73 + ], + [ + 289, + 57 + ], + [ + 283, + 56 + ], + [ + 268, + 41 + ], + [ + 276, + 61 + ], + [ + 269, + 67 + ], + [ + 278, + 81 + ], + [ + 276, + 85 + ], + [ + 284, + 92 + ], + [ + 287, + 99 + ], + [ + 296, + 98 + ], + [ + 300, + 107 + ], + [ + 287, + 113 + ], + [ + 280, + 101 + ], + [ + 271, + 90 + ], + [ + 273, + 105 + ], + [ + 272, + 116 + ], + [ + 252, + 109 + ], + [ + 259, + 127 + ], + [ + 264, + 139 + ], + [ + 255, + 139 + ], + [ + 248, + 150 + ], + [ + 250, + 163 + ], + [ + 235, + 161 + ], + [ + 225, + 171 + ], + [ + 215, + 162 + ], + [ + 214, + 176 + ], + [ + 202, + 177 + ], + [ + 205, + 195 + ], + [ + 193, + 189 + ], + [ + 195, + 207 + ], + [ + 183, + 206 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 808, + 365 + ], + [ + 809, + 465 + ], + [ + 805, + 465 + ], + [ + 804, + 366 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 205, + 498 + ], + [ + 203, + 449 + ], + [ + 203, + 374 + ], + [ + 199, + 374 + ], + [ + 201, + 498 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 164, + 333 + ], + [ + 166, + 377 + ], + [ + 235, + 378 + ], + [ + 235, + 335 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 224, + 383 + ], + [ + 176, + 382 + ], + [ + 178, + 451 + ], + [ + 227, + 452 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 502, + 368 + ], + [ + 505, + 498 + ], + [ + 513, + 498 + ], + [ + 510, + 369 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 455, + 264 + ], + [ + 451, + 265 + ], + [ + 449, + 269 + ], + [ + 451, + 366 + ], + [ + 452, + 370 + ], + [ + 456, + 371 + ], + [ + 559, + 371 + ], + [ + 561, + 370 + ], + [ + 561, + 366 + ], + [ + 560, + 301 + ], + [ + 553, + 304 + ], + [ + 544, + 296 + ], + [ + 538, + 301 + ], + [ + 531, + 301 + ], + [ + 519, + 293 + ], + [ + 524, + 285 + ], + [ + 519, + 270 + ], + [ + 535, + 280 + ], + [ + 536, + 272 + ], + [ + 545, + 276 + ], + [ + 543, + 265 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 531, + 405 + ], + [ + 482, + 405 + ], + [ + 479, + 406 + ], + [ + 481, + 500 + ], + [ + 483, + 500 + ], + [ + 482, + 470 + ], + [ + 533, + 469 + ], + [ + 533, + 498 + ], + [ + 535, + 499 + ], + [ + 534, + 406 + ], + [ + 534, + 405 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 683, + 348 + ], + [ + 677, + 369 + ], + [ + 683, + 380 + ], + [ + 694, + 380 + ], + [ + 693, + 347 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 688, + 310 + ], + [ + 693, + 458 + ], + [ + 692, + 465 + ], + [ + 693, + 496 + ], + [ + 702, + 496 + ], + [ + 702, + 464 + ], + [ + 701, + 458 + ], + [ + 697, + 310 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 703, + 323 + ], + [ + 697, + 322 + ], + [ + 698, + 373 + ], + [ + 710, + 372 + ], + [ + 711, + 369 + ], + [ + 715, + 367 + ], + [ + 717, + 364 + ], + [ + 717, + 361 + ], + [ + 711, + 358 + ], + [ + 710, + 352 + ], + [ + 715, + 350 + ], + [ + 717, + 347 + ], + [ + 715, + 343 + ], + [ + 709, + 342 + ], + [ + 709, + 336 + ], + [ + 714, + 335 + ], + [ + 715, + 332 + ], + [ + 714, + 328 + ], + [ + 710, + 327 + ], + [ + 707, + 327 + ], + [ + 707, + 323 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1092, + 400 + ], + [ + 1071, + 396 + ], + [ + 1024, + 393 + ], + [ + 1024, + 403 + ], + [ + 1015, + 405 + ], + [ + 1016, + 467 + ], + [ + 1019, + 467 + ], + [ + 1019, + 460 + ], + [ + 1047, + 460 + ], + [ + 1046, + 467 + ], + [ + 1051, + 466 + ], + [ + 1051, + 460 + ], + [ + 1084, + 459 + ], + [ + 1086, + 403 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1007, + 449 + ], + [ + 993, + 449 + ], + [ + 993, + 472 + ], + [ + 1007, + 472 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1117, + 442 + ], + [ + 1112, + 439 + ], + [ + 1105, + 439 + ], + [ + 1098, + 443 + ], + [ + 1099, + 450 + ], + [ + 1117, + 449 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1071, + 405 + ], + [ + 1070, + 393 + ], + [ + 1064, + 390 + ], + [ + 1052, + 390 + ], + [ + 1038, + 390 + ], + [ + 1034, + 409 + ], + [ + 1067, + 409 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1014, + 381 + ], + [ + 1014, + 385 + ], + [ + 1021, + 385 + ], + [ + 1021, + 381 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1011, + 387 + ], + [ + 1017, + 404 + ], + [ + 1024, + 388 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1022, + 374 + ], + [ + 1023, + 473 + ], + [ + 1028, + 473 + ], + [ + 1026, + 374 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1055, + 375 + ], + [ + 1035, + 376 + ], + [ + 1035, + 408 + ], + [ + 1055, + 407 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1033, + 388 + ], + [ + 1028, + 388 + ], + [ + 1026, + 388 + ], + [ + 1027, + 410 + ], + [ + 1032, + 409 + ], + [ + 1034, + 404 + ], + [ + 1040, + 402 + ], + [ + 1040, + 399 + ], + [ + 1034, + 399 + ], + [ + 1035, + 395 + ], + [ + 1040, + 395 + ], + [ + 1040, + 390 + ], + [ + 1035, + 390 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1233, + 484 + ], + [ + 1218, + 365 + ], + [ + 1215, + 362 + ], + [ + 1214, + 369 + ], + [ + 1229, + 484 + ], + [ + 1231, + 484 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1200, + 330 + ], + [ + 1197, + 335 + ], + [ + 1197, + 344 + ], + [ + 1199, + 353 + ], + [ + 1203, + 356 + ], + [ + 1207, + 359 + ], + [ + 1215, + 359 + ], + [ + 1220, + 355 + ], + [ + 1224, + 349 + ], + [ + 1224, + 339 + ], + [ + 1220, + 331 + ], + [ + 1214, + 326 + ], + [ + 1208, + 326 + ], + [ + 1204, + 327 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1224, + 368 + ], + [ + 1223, + 356 + ], + [ + 1203, + 357 + ], + [ + 1203, + 369 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1923, + 496 + ], + [ + 1887, + 497 + ], + [ + 1862, + 587 + ], + [ + 1898, + 619 + ], + [ + 2047, + 639 + ], + [ + 2047, + 516 + ], + [ + 1924, + 508 + ], + [ + 1925, + 496 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1833, + 396 + ], + [ + 1832, + 405 + ], + [ + 1829, + 423 + ], + [ + 1820, + 437 + ], + [ + 1887, + 500 + ], + [ + 1925, + 496 + ], + [ + 1927, + 462 + ], + [ + 1923, + 417 + ], + [ + 1847, + 428 + ], + [ + 1842, + 423 + ], + [ + 1841, + 405 + ], + [ + 1836, + 396 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 1903, + 390 + ], + [ + 1883, + 393 + ], + [ + 1872, + 405 + ], + [ + 1868, + 429 + ], + [ + 1872, + 461 + ], + [ + 1865, + 482 + ], + [ + 1884, + 498 + ], + [ + 1887, + 502 + ], + [ + 1906, + 502 + ], + [ + 1925, + 431 + ], + [ + 1922, + 416 + ], + [ + 1914, + 407 + ], + [ + 1908, + 403 + ], + [ + 1906, + 397 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1904, + 471 + ], + [ + 1900, + 479 + ], + [ + 1898, + 489 + ], + [ + 1899, + 499 + ], + [ + 1900, + 502 + ], + [ + 1924, + 503 + ], + [ + 1927, + 462 + ], + [ + 1924, + 431 + ], + [ + 1915, + 436 + ], + [ + 1908, + 435 + ], + [ + 1907, + 438 + ], + [ + 1915, + 443 + ], + [ + 1914, + 458 + ], + [ + 1911, + 464 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1628, + 286 + ], + [ + 1627, + 345 + ], + [ + 1628, + 388 + ], + [ + 1639, + 388 + ], + [ + 1636, + 285 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1609, + 293 + ], + [ + 1608, + 310 + ], + [ + 1618, + 310 + ], + [ + 1619, + 315 + ], + [ + 1643, + 315 + ], + [ + 1643, + 293 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1609, + 353 + ], + [ + 1606, + 361 + ], + [ + 1608, + 368 + ], + [ + 1610, + 370 + ], + [ + 1615, + 370 + ], + [ + 1617, + 367 + ], + [ + 1617, + 357 + ], + [ + 1614, + 353 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1616, + 334 + ], + [ + 1615, + 338 + ], + [ + 1611, + 342 + ], + [ + 1611, + 347 + ], + [ + 1614, + 350 + ], + [ + 1616, + 353 + ], + [ + 1616, + 358 + ], + [ + 1631, + 358 + ], + [ + 1630, + 334 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1622, + 362 + ], + [ + 1620, + 371 + ], + [ + 1622, + 381 + ], + [ + 1624, + 387 + ], + [ + 1631, + 381 + ], + [ + 1633, + 375 + ], + [ + 1633, + 364 + ], + [ + 1630, + 359 + ], + [ + 1626, + 358 + ], + [ + 1624, + 358 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1684, + 390 + ], + [ + 1685, + 304 + ], + [ + 1682, + 0 + ], + [ + 1616, + 0 + ], + [ + 1623, + 17 + ], + [ + 1624, + 34 + ], + [ + 1625, + 174 + ], + [ + 1635, + 175 + ], + [ + 1634, + 385 + ], + [ + 1656, + 402 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1890, + 560 + ], + [ + 1968, + 569 + ], + [ + 2021, + 577 + ], + [ + 2047, + 589 + ], + [ + 2031, + 593 + ], + [ + 1895, + 589 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 231, + 721 + ], + [ + 60, + 778 + ], + [ + 0, + 799 + ], + [ + 1, + 1023 + ], + [ + 175, + 1024 + ], + [ + 358, + 882 + ], + [ + 516, + 746 + ], + [ + 505, + 715 + ], + [ + 234, + 721 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1728, + 299 + ], + [ + 1701, + 299 + ], + [ + 1701, + 325 + ], + [ + 1729, + 324 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1748, + 401 + ], + [ + 1747, + 311 + ], + [ + 1708, + 310 + ], + [ + 1708, + 313 + ], + [ + 1740, + 315 + ], + [ + 1740, + 397 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1887, + 537 + ], + [ + 1901, + 537 + ], + [ + 1953, + 539 + ], + [ + 1963, + 543 + ], + [ + 1971, + 549 + ], + [ + 1975, + 572 + ], + [ + 1974, + 575 + ], + [ + 1967, + 576 + ], + [ + 1967, + 559 + ], + [ + 1960, + 549 + ], + [ + 1950, + 545 + ], + [ + 1879, + 540 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1752, + 327 + ], + [ + 1728, + 328 + ], + [ + 1727, + 375 + ], + [ + 1753, + 375 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1688, + 140 + ], + [ + 1685, + 142 + ], + [ + 1685, + 146 + ], + [ + 1687, + 183 + ], + [ + 1775, + 177 + ], + [ + 1777, + 176 + ], + [ + 1779, + 171 + ], + [ + 1776, + 138 + ], + [ + 1773, + 135 + ], + [ + 1771, + 134 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1676, + 178 + ], + [ + 1672, + 181 + ], + [ + 1672, + 185 + ], + [ + 1723, + 263 + ], + [ + 1725, + 264 + ], + [ + 1731, + 265 + ], + [ + 1733, + 263 + ], + [ + 1776, + 179 + ], + [ + 1776, + 176 + ], + [ + 1773, + 174 + ], + [ + 1767, + 174 + ], + [ + 1680, + 178 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1659, + 214 + ], + [ + 1659, + 312 + ], + [ + 1665, + 320 + ], + [ + 1685, + 319 + ], + [ + 1694, + 311 + ], + [ + 1694, + 212 + ], + [ + 1686, + 206 + ], + [ + 1666, + 206 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1243, + 575 + ], + [ + 1243, + 549 + ], + [ + 1247, + 536 + ], + [ + 1251, + 525 + ], + [ + 1258, + 516 + ], + [ + 1264, + 510 + ], + [ + 1279, + 503 + ], + [ + 1294, + 491 + ], + [ + 1280, + 488 + ], + [ + 1279, + 480 + ], + [ + 1279, + 470 + ], + [ + 1283, + 464 + ], + [ + 1290, + 460 + ], + [ + 1302, + 459 + ], + [ + 1311, + 461 + ], + [ + 1313, + 466 + ], + [ + 1336, + 441 + ], + [ + 1369, + 415 + ], + [ + 1399, + 396 + ], + [ + 1418, + 390 + ], + [ + 1447, + 384 + ], + [ + 1512, + 378 + ], + [ + 1578, + 378 + ], + [ + 1641, + 379 + ], + [ + 1702, + 385 + ], + [ + 1750, + 393 + ], + [ + 1797, + 406 + ], + [ + 1811, + 415 + ], + [ + 1839, + 446 + ], + [ + 1862, + 474 + ], + [ + 1881, + 493 + ], + [ + 1893, + 512 + ], + [ + 1894, + 528 + ], + [ + 1893, + 542 + ], + [ + 1894, + 562 + ], + [ + 1901, + 581 + ], + [ + 1901, + 612 + ], + [ + 1903, + 629 + ], + [ + 1898, + 660 + ], + [ + 1890, + 671 + ], + [ + 1881, + 676 + ], + [ + 1877, + 694 + ], + [ + 1867, + 715 + ], + [ + 1851, + 724 + ], + [ + 1833, + 728 + ], + [ + 1803, + 726 + ], + [ + 1788, + 719 + ], + [ + 1778, + 710 + ], + [ + 1772, + 694 + ], + [ + 1760, + 687 + ], + [ + 1745, + 688 + ], + [ + 1737, + 690 + ], + [ + 1730, + 687 + ], + [ + 1627, + 689 + ], + [ + 1542, + 691 + ], + [ + 1534, + 715 + ], + [ + 1522, + 731 + ], + [ + 1507, + 736 + ], + [ + 1470, + 733 + ], + [ + 1451, + 728 + ], + [ + 1438, + 714 + ], + [ + 1432, + 697 + ], + [ + 1428, + 677 + ], + [ + 1313, + 650 + ], + [ + 1308, + 664 + ], + [ + 1294, + 672 + ], + [ + 1264, + 669 + ], + [ + 1250, + 661 + ], + [ + 1242, + 638 + ], + [ + 1240, + 610 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1675, + 561 + ], + [ + 1674, + 525 + ], + [ + 1802, + 526 + ], + [ + 1805, + 560 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 137, + 292 + ], + [ + 123, + 296 + ], + [ + 118, + 298 + ], + [ + 115, + 339 + ], + [ + 123, + 345 + ], + [ + 132, + 349 + ], + [ + 149, + 345 + ], + [ + 157, + 336 + ], + [ + 159, + 321 + ], + [ + 157, + 303 + ], + [ + 149, + 295 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 150, + 818 + ], + [ + 141, + 518 + ], + [ + 128, + 497 + ], + [ + 119, + 207 + ], + [ + 97, + 145 + ], + [ + 75, + 231 + ], + [ + 83, + 498 + ], + [ + 71, + 522 + ], + [ + 81, + 832 + ], + [ + 151, + 826 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 173, + 64 + ], + [ + 173, + 1 + ], + [ + 3, + 1 + ], + [ + 4, + 62 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 196, + 58 + ], + [ + 0, + 50 + ], + [ + 0, + 110 + ], + [ + 82, + 242 + ], + [ + 95, + 244 + ], + [ + 102, + 245 + ], + [ + 175, + 127 + ], + [ + 208, + 76 + ], + [ + 208, + 67 + ], + [ + 203, + 62 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 53, + 242 + ], + [ + 53, + 265 + ], + [ + 48, + 268 + ], + [ + 50, + 325 + ], + [ + 59, + 338 + ], + [ + 63, + 393 + ], + [ + 118, + 391 + ], + [ + 116, + 332 + ], + [ + 106, + 248 + ], + [ + 90, + 242 + ], + [ + 77, + 241 + ], + [ + 62, + 241 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..53346f982d747502fefd48c812bbba707fe3498c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..dac281e96360778d0b142d0fc605017c03c1ca48 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c07987ed9835a3f579e23e14dd5112ce0d237108 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000095_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000096_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000096_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..735eb413542666243f413a9a523bbc54759d8b0d --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000096_000019_gtFine_polygons.json @@ -0,0 +1,6503 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 28, + 0 + ], + [ + 446, + 346 + ], + [ + 820, + 419 + ], + [ + 1182, + 405 + ], + [ + 2048, + 308 + ], + [ + 2048, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 910, + 424 + ], + [ + 734, + 422 + ], + [ + 432, + 421 + ], + [ + 1, + 411 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 427 + ], + [ + 1128, + 421 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2048, + 214 + ], + [ + 1962, + 231 + ], + [ + 1885, + 246 + ], + [ + 1692, + 242 + ], + [ + 1626, + 131 + ], + [ + 1618, + 24 + ], + [ + 1553, + 26 + ], + [ + 1553, + 13 + ], + [ + 1520, + 11 + ], + [ + 1456, + 10 + ], + [ + 1437, + 23 + ], + [ + 1438, + 33 + ], + [ + 1465, + 33 + ], + [ + 1466, + 46 + ], + [ + 1450, + 56 + ], + [ + 1450, + 60 + ], + [ + 1440, + 68 + ], + [ + 1432, + 67 + ], + [ + 1363, + 109 + ], + [ + 1363, + 160 + ], + [ + 1318, + 353 + ], + [ + 1474, + 425 + ], + [ + 2048, + 441 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 575, + 324 + ], + [ + 577, + 253 + ], + [ + 562, + 236 + ], + [ + 562, + 220 + ], + [ + 578, + 222 + ], + [ + 578, + 217 + ], + [ + 572, + 208 + ], + [ + 469, + 207 + ], + [ + 470, + 209 + ], + [ + 485, + 221 + ], + [ + 487, + 221 + ], + [ + 487, + 307 + ], + [ + 453, + 307 + ], + [ + 441, + 310 + ], + [ + 436, + 215 + ], + [ + 420, + 172 + ], + [ + 414, + 170 + ], + [ + 413, + 173 + ], + [ + 398, + 193 + ], + [ + 389, + 190 + ], + [ + 387, + 182 + ], + [ + 183, + 67 + ], + [ + 66, + 1 + ], + [ + 1, + 1 + ], + [ + 0, + 476 + ], + [ + 280, + 462 + ], + [ + 432, + 443 + ], + [ + 517, + 438 + ], + [ + 621, + 431 + ], + [ + 579, + 325 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 341, + 465 + ], + [ + 290, + 469 + ], + [ + 223, + 474 + ], + [ + 121, + 483 + ], + [ + 48, + 487 + ], + [ + 0, + 489 + ], + [ + 0, + 450 + ], + [ + 89, + 448 + ], + [ + 143, + 449 + ], + [ + 189, + 448 + ], + [ + 241, + 448 + ], + [ + 316, + 446 + ], + [ + 386, + 444 + ], + [ + 411, + 441 + ], + [ + 420, + 439 + ], + [ + 450, + 439 + ], + [ + 481, + 436 + ], + [ + 504, + 435 + ], + [ + 531, + 433 + ], + [ + 547, + 439 + ], + [ + 552, + 443 + ], + [ + 505, + 443 + ], + [ + 476, + 445 + ], + [ + 441, + 451 + ], + [ + 397, + 458 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 84, + 461 + ], + [ + 19, + 457 + ], + [ + 0, + 453 + ], + [ + 1, + 489 + ], + [ + 1, + 484 + ], + [ + 72, + 479 + ], + [ + 106, + 477 + ], + [ + 168, + 473 + ], + [ + 220, + 472 + ], + [ + 169, + 468 + ], + [ + 155, + 463 + ], + [ + 112, + 461 + ], + [ + 82, + 461 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 170, + 457 + ], + [ + 214, + 459 + ], + [ + 277, + 462 + ], + [ + 322, + 462 + ], + [ + 338, + 465 + ], + [ + 393, + 458 + ], + [ + 440, + 451 + ], + [ + 476, + 445 + ], + [ + 505, + 443 + ], + [ + 551, + 443 + ], + [ + 535, + 433 + ], + [ + 494, + 435 + ], + [ + 448, + 439 + ], + [ + 419, + 438 + ], + [ + 389, + 444 + ], + [ + 323, + 445 + ], + [ + 277, + 448 + ], + [ + 242, + 451 + ], + [ + 206, + 453 + ], + [ + 178, + 454 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 652, + 324 + ], + [ + 641, + 324 + ], + [ + 637, + 320 + ], + [ + 632, + 319 + ], + [ + 629, + 317 + ], + [ + 614, + 320 + ], + [ + 607, + 314 + ], + [ + 607, + 317 + ], + [ + 600, + 317 + ], + [ + 600, + 323 + ], + [ + 594, + 319 + ], + [ + 595, + 323 + ], + [ + 585, + 319 + ], + [ + 574, + 314 + ], + [ + 572, + 310 + ], + [ + 566, + 315 + ], + [ + 566, + 346 + ], + [ + 566, + 366 + ], + [ + 567, + 388 + ], + [ + 616, + 387 + ], + [ + 615, + 434 + ], + [ + 622, + 433 + ], + [ + 624, + 438 + ], + [ + 638, + 440 + ], + [ + 654, + 439 + ], + [ + 661, + 438 + ], + [ + 796, + 436 + ], + [ + 810, + 436 + ], + [ + 846, + 435 + ], + [ + 893, + 434 + ], + [ + 917, + 436 + ], + [ + 963, + 440 + ], + [ + 1026, + 443 + ], + [ + 1103, + 444 + ], + [ + 1179, + 441 + ], + [ + 1293, + 435 + ], + [ + 1371, + 405 + ], + [ + 1335, + 366 + ], + [ + 1215, + 326 + ], + [ + 1100, + 337 + ], + [ + 1083, + 363 + ], + [ + 1076, + 369 + ], + [ + 1073, + 369 + ], + [ + 1068, + 372 + ], + [ + 1072, + 378 + ], + [ + 1072, + 381 + ], + [ + 1066, + 374 + ], + [ + 1058, + 370 + ], + [ + 1048, + 363 + ], + [ + 1047, + 355 + ], + [ + 1043, + 356 + ], + [ + 1042, + 346 + ], + [ + 980, + 321 + ], + [ + 893, + 353 + ], + [ + 895, + 359 + ], + [ + 892, + 364 + ], + [ + 889, + 382 + ], + [ + 879, + 381 + ], + [ + 871, + 387 + ], + [ + 869, + 394 + ], + [ + 868, + 386 + ], + [ + 864, + 381 + ], + [ + 863, + 390 + ], + [ + 862, + 400 + ], + [ + 858, + 395 + ], + [ + 852, + 395 + ], + [ + 851, + 389 + ], + [ + 854, + 388 + ], + [ + 856, + 383 + ], + [ + 856, + 379 + ], + [ + 854, + 374 + ], + [ + 851, + 369 + ], + [ + 847, + 364 + ], + [ + 842, + 360 + ], + [ + 833, + 364 + ], + [ + 830, + 370 + ], + [ + 830, + 377 + ], + [ + 827, + 379 + ], + [ + 829, + 383 + ], + [ + 830, + 390 + ], + [ + 830, + 397 + ], + [ + 826, + 400 + ], + [ + 821, + 401 + ], + [ + 812, + 395 + ], + [ + 808, + 386 + ], + [ + 804, + 380 + ], + [ + 792, + 372 + ], + [ + 781, + 377 + ], + [ + 772, + 378 + ], + [ + 766, + 376 + ], + [ + 760, + 359 + ], + [ + 663, + 331 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 640, + 450 + ], + [ + 599, + 457 + ], + [ + 552, + 462 + ], + [ + 456, + 475 + ], + [ + 372, + 487 + ], + [ + 269, + 497 + ], + [ + 195, + 504 + ], + [ + 184, + 503 + ], + [ + 134, + 502 + ], + [ + 134, + 498 + ], + [ + 141, + 493 + ], + [ + 153, + 489 + ], + [ + 192, + 483 + ], + [ + 247, + 479 + ], + [ + 311, + 474 + ], + [ + 343, + 468 + ], + [ + 356, + 464 + ], + [ + 377, + 463 + ], + [ + 379, + 452 + ], + [ + 393, + 453 + ], + [ + 395, + 461 + ], + [ + 413, + 460 + ], + [ + 442, + 458 + ], + [ + 455, + 456 + ], + [ + 467, + 459 + ], + [ + 480, + 455 + ], + [ + 494, + 455 + ], + [ + 505, + 454 + ], + [ + 523, + 452 + ], + [ + 537, + 450 + ], + [ + 550, + 445 + ], + [ + 592, + 437 + ], + [ + 613, + 437 + ], + [ + 630, + 432 + ], + [ + 648, + 434 + ], + [ + 662, + 436 + ], + [ + 722, + 429 + ], + [ + 760, + 428 + ], + [ + 785, + 434 + ], + [ + 793, + 445 + ], + [ + 781, + 450 + ], + [ + 739, + 449 + ], + [ + 671, + 449 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 552, + 413 + ], + [ + 546, + 428 + ], + [ + 529, + 430 + ], + [ + 522, + 426 + ], + [ + 534, + 443 + ], + [ + 539, + 450 + ], + [ + 564, + 450 + ], + [ + 585, + 445 + ], + [ + 592, + 439 + ], + [ + 589, + 433 + ], + [ + 589, + 420 + ], + [ + 588, + 417 + ], + [ + 577, + 425 + ], + [ + 566, + 418 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 356, + 395 + ], + [ + 324, + 372 + ], + [ + 315, + 357 + ], + [ + 331, + 353 + ], + [ + 325, + 323 + ], + [ + 335, + 304 + ], + [ + 355, + 304 + ], + [ + 358, + 287 + ], + [ + 353, + 267 + ], + [ + 357, + 229 + ], + [ + 373, + 257 + ], + [ + 377, + 284 + ], + [ + 384, + 305 + ], + [ + 393, + 327 + ], + [ + 417, + 341 + ], + [ + 425, + 333 + ], + [ + 422, + 303 + ], + [ + 440, + 297 + ], + [ + 441, + 260 + ], + [ + 442, + 285 + ], + [ + 445, + 293 + ], + [ + 451, + 289 + ], + [ + 458, + 293 + ], + [ + 455, + 296 + ], + [ + 466, + 308 + ], + [ + 488, + 311 + ], + [ + 495, + 303 + ], + [ + 497, + 291 + ], + [ + 492, + 284 + ], + [ + 495, + 283 + ], + [ + 509, + 292 + ], + [ + 511, + 308 + ], + [ + 512, + 314 + ], + [ + 528, + 314 + ], + [ + 543, + 313 + ], + [ + 557, + 325 + ], + [ + 561, + 336 + ], + [ + 573, + 329 + ], + [ + 590, + 335 + ], + [ + 623, + 356 + ], + [ + 630, + 367 + ], + [ + 620, + 383 + ], + [ + 602, + 398 + ], + [ + 581, + 401 + ], + [ + 573, + 413 + ], + [ + 559, + 392 + ], + [ + 537, + 378 + ], + [ + 522, + 369 + ], + [ + 512, + 373 + ], + [ + 511, + 398 + ], + [ + 514, + 453 + ], + [ + 511, + 453 + ], + [ + 508, + 402 + ], + [ + 505, + 378 + ], + [ + 499, + 365 + ], + [ + 486, + 363 + ], + [ + 482, + 367 + ], + [ + 477, + 382 + ], + [ + 468, + 390 + ], + [ + 457, + 402 + ], + [ + 456, + 458 + ], + [ + 453, + 457 + ], + [ + 452, + 401 + ], + [ + 443, + 390 + ], + [ + 429, + 378 + ], + [ + 430, + 367 + ], + [ + 430, + 353 + ], + [ + 428, + 345 + ], + [ + 422, + 342 + ], + [ + 419, + 359 + ], + [ + 412, + 370 + ], + [ + 402, + 377 + ], + [ + 388, + 385 + ], + [ + 372, + 391 + ], + [ + 369, + 398 + ], + [ + 364, + 400 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 471, + 395 + ], + [ + 453, + 395 + ], + [ + 439, + 398 + ], + [ + 440, + 461 + ], + [ + 470, + 460 + ], + [ + 469, + 405 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 389, + 389 + ], + [ + 357, + 390 + ], + [ + 358, + 467 + ], + [ + 390, + 466 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 501, + 400 + ], + [ + 502, + 455 + ], + [ + 524, + 454 + ], + [ + 522, + 399 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 590, + 405 + ], + [ + 567, + 406 + ], + [ + 569, + 449 + ], + [ + 590, + 447 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 541, + 404 + ], + [ + 541, + 451 + ], + [ + 564, + 451 + ], + [ + 561, + 405 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 212, + 299 + ], + [ + 212, + 484 + ], + [ + 218, + 484 + ], + [ + 216, + 299 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 197, + 299 + ], + [ + 197, + 352 + ], + [ + 213, + 354 + ], + [ + 213, + 349 + ], + [ + 207, + 346 + ], + [ + 207, + 301 + ], + [ + 214, + 301 + ], + [ + 214, + 299 + ], + [ + 200, + 299 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 237, + 301 + ], + [ + 217, + 299 + ], + [ + 217, + 302 + ], + [ + 227, + 303 + ], + [ + 226, + 347 + ], + [ + 215, + 348 + ], + [ + 216, + 355 + ], + [ + 237, + 353 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 206, + 373 + ], + [ + 200, + 380 + ], + [ + 199, + 389 + ], + [ + 203, + 399 + ], + [ + 208, + 404 + ], + [ + 220, + 404 + ], + [ + 228, + 402 + ], + [ + 233, + 393 + ], + [ + 233, + 381 + ], + [ + 228, + 374 + ], + [ + 216, + 370 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 632, + 330 + ], + [ + 654, + 321 + ], + [ + 679, + 315 + ], + [ + 691, + 286 + ], + [ + 698, + 270 + ], + [ + 724, + 234 + ], + [ + 743, + 208 + ], + [ + 762, + 191 + ], + [ + 796, + 168 + ], + [ + 825, + 154 + ], + [ + 856, + 146 + ], + [ + 889, + 142 + ], + [ + 915, + 143 + ], + [ + 938, + 146 + ], + [ + 961, + 151 + ], + [ + 984, + 158 + ], + [ + 1012, + 170 + ], + [ + 1034, + 182 + ], + [ + 1057, + 194 + ], + [ + 1080, + 212 + ], + [ + 1102, + 232 + ], + [ + 1125, + 255 + ], + [ + 1150, + 285 + ], + [ + 1211, + 284 + ], + [ + 1233, + 276 + ], + [ + 1245, + 229 + ], + [ + 1253, + 200 + ], + [ + 1269, + 159 + ], + [ + 1285, + 130 + ], + [ + 1297, + 116 + ], + [ + 1306, + 109 + ], + [ + 1318, + 108 + ], + [ + 1335, + 109 + ], + [ + 1350, + 122 + ], + [ + 1358, + 135 + ], + [ + 1366, + 161 + ], + [ + 1383, + 220 + ], + [ + 1392, + 266 + ], + [ + 1396, + 300 + ], + [ + 1246, + 451 + ], + [ + 1195, + 454 + ], + [ + 1166, + 453 + ], + [ + 1168, + 436 + ], + [ + 1169, + 410 + ], + [ + 1181, + 374 + ], + [ + 1193, + 374 + ], + [ + 1146, + 347 + ], + [ + 980, + 350 + ], + [ + 824, + 357 + ], + [ + 761, + 359 + ], + [ + 769, + 383 + ], + [ + 779, + 400 + ], + [ + 757, + 440 + ], + [ + 716, + 441 + ], + [ + 653, + 439 + ], + [ + 653, + 427 + ], + [ + 659, + 417 + ], + [ + 670, + 416 + ], + [ + 669, + 394 + ], + [ + 653, + 379 + ], + [ + 634, + 364 + ], + [ + 630, + 356 + ], + [ + 630, + 341 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1249, + 455 + ], + [ + 1197, + 456 + ], + [ + 1058, + 458 + ], + [ + 1015, + 457 + ], + [ + 1013, + 438 + ], + [ + 1037, + 436 + ], + [ + 1062, + 439 + ], + [ + 1090, + 441 + ], + [ + 1109, + 441 + ], + [ + 1150, + 441 + ], + [ + 1168, + 441 + ], + [ + 1169, + 450 + ], + [ + 1199, + 452 + ], + [ + 1227, + 451 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1053, + 444 + ], + [ + 1018, + 445 + ], + [ + 1016, + 441 + ], + [ + 1031, + 439 + ], + [ + 1048, + 441 + ], + [ + 1081, + 443 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 982, + 375 + ], + [ + 981, + 414 + ], + [ + 983, + 416 + ], + [ + 984, + 375 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1052, + 355 + ], + [ + 1050, + 352 + ], + [ + 1053, + 350 + ], + [ + 1056, + 352 + ], + [ + 1054, + 356 + ], + [ + 1054, + 371 + ], + [ + 1053, + 370 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1029, + 388 + ], + [ + 1029, + 439 + ], + [ + 1031, + 439 + ], + [ + 1030, + 390 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1025, + 405 + ], + [ + 1025, + 409 + ], + [ + 1027, + 410 + ], + [ + 1031, + 411 + ], + [ + 1034, + 409 + ], + [ + 1035, + 406 + ], + [ + 1034, + 403 + ], + [ + 1031, + 402 + ], + [ + 1029, + 402 + ], + [ + 1027, + 402 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1006, + 364 + ], + [ + 1006, + 400 + ], + [ + 1045, + 400 + ], + [ + 1045, + 364 + ] + ] + }, + { + "label": "bridge", + "polygon": [ + [ + 632, + 330 + ], + [ + 654, + 321 + ], + [ + 679, + 315 + ], + [ + 691, + 286 + ], + [ + 698, + 270 + ], + [ + 724, + 234 + ], + [ + 743, + 208 + ], + [ + 762, + 191 + ], + [ + 796, + 168 + ], + [ + 825, + 154 + ], + [ + 856, + 146 + ], + [ + 889, + 142 + ], + [ + 915, + 143 + ], + [ + 938, + 146 + ], + [ + 961, + 151 + ], + [ + 984, + 158 + ], + [ + 1012, + 170 + ], + [ + 1034, + 182 + ], + [ + 1057, + 194 + ], + [ + 1080, + 212 + ], + [ + 1102, + 232 + ], + [ + 1125, + 255 + ], + [ + 1150, + 285 + ], + [ + 1211, + 284 + ], + [ + 1233, + 276 + ], + [ + 1245, + 229 + ], + [ + 1253, + 200 + ], + [ + 1269, + 159 + ], + [ + 1285, + 130 + ], + [ + 1297, + 116 + ], + [ + 1306, + 109 + ], + [ + 1318, + 108 + ], + [ + 1335, + 109 + ], + [ + 1350, + 122 + ], + [ + 1358, + 135 + ], + [ + 1366, + 161 + ], + [ + 1383, + 220 + ], + [ + 1392, + 266 + ], + [ + 1396, + 300 + ], + [ + 1246, + 451 + ], + [ + 1195, + 454 + ], + [ + 1166, + 453 + ], + [ + 1168, + 436 + ], + [ + 1169, + 410 + ], + [ + 1181, + 374 + ], + [ + 1193, + 374 + ], + [ + 1146, + 347 + ], + [ + 980, + 350 + ], + [ + 824, + 357 + ], + [ + 761, + 359 + ], + [ + 769, + 383 + ], + [ + 779, + 400 + ], + [ + 757, + 440 + ], + [ + 716, + 441 + ], + [ + 653, + 439 + ], + [ + 653, + 427 + ], + [ + 659, + 417 + ], + [ + 670, + 416 + ], + [ + 669, + 394 + ], + [ + 653, + 379 + ], + [ + 634, + 364 + ], + [ + 630, + 356 + ], + [ + 630, + 341 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 800, + 431 + ], + [ + 802, + 427 + ], + [ + 803, + 424 + ], + [ + 809, + 424 + ], + [ + 808, + 435 + ], + [ + 801, + 434 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 828, + 429 + ], + [ + 827, + 426 + ], + [ + 824, + 422 + ], + [ + 812, + 422 + ], + [ + 809, + 430 + ], + [ + 809, + 436 + ], + [ + 812, + 437 + ], + [ + 814, + 435 + ], + [ + 824, + 435 + ], + [ + 826, + 437 + ], + [ + 828, + 436 + ], + [ + 828, + 432 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 874, + 427 + ], + [ + 864, + 427 + ], + [ + 861, + 433 + ], + [ + 862, + 437 + ], + [ + 873, + 437 + ], + [ + 875, + 432 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 887, + 426 + ], + [ + 873, + 425 + ], + [ + 871, + 432 + ], + [ + 872, + 438 + ], + [ + 876, + 439 + ], + [ + 887, + 437 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 901, + 441 + ], + [ + 903, + 440 + ], + [ + 903, + 431 + ], + [ + 902, + 425 + ], + [ + 885, + 425 + ], + [ + 881, + 429 + ], + [ + 881, + 435 + ], + [ + 881, + 441 + ], + [ + 883, + 441 + ], + [ + 886, + 438 + ], + [ + 897, + 438 + ], + [ + 898, + 441 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 856, + 441 + ], + [ + 858, + 440 + ], + [ + 859, + 430 + ], + [ + 854, + 421 + ], + [ + 838, + 422 + ], + [ + 834, + 425 + ], + [ + 834, + 432 + ], + [ + 834, + 439 + ], + [ + 836, + 440 + ], + [ + 840, + 440 + ], + [ + 840, + 438 + ], + [ + 853, + 438 + ], + [ + 853, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 949, + 452 + ], + [ + 950, + 435 + ], + [ + 949, + 422 + ], + [ + 944, + 412 + ], + [ + 918, + 412 + ], + [ + 912, + 428 + ], + [ + 911, + 438 + ], + [ + 911, + 452 + ], + [ + 917, + 452 + ], + [ + 918, + 447 + ], + [ + 928, + 447 + ], + [ + 942, + 447 + ], + [ + 942, + 452 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 792, + 456 + ], + [ + 794, + 358 + ], + [ + 796, + 319 + ], + [ + 803, + 313 + ], + [ + 811, + 309 + ], + [ + 878, + 298 + ], + [ + 877, + 296 + ], + [ + 812, + 307 + ], + [ + 803, + 311 + ], + [ + 795, + 317 + ], + [ + 794, + 322 + ], + [ + 789, + 456 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 785, + 190 + ], + [ + 781, + 457 + ], + [ + 786, + 457 + ], + [ + 786, + 456 + ], + [ + 788, + 190 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 800, + 186 + ], + [ + 793, + 189 + ], + [ + 779, + 189 + ], + [ + 775, + 185 + ], + [ + 757, + 185 + ], + [ + 756, + 187 + ], + [ + 758, + 190 + ], + [ + 777, + 193 + ], + [ + 779, + 192 + ], + [ + 797, + 192 + ], + [ + 807, + 192 + ], + [ + 808, + 193 + ], + [ + 813, + 193 + ], + [ + 816, + 189 + ], + [ + 812, + 185 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 802, + 340 + ], + [ + 781, + 340 + ], + [ + 781, + 359 + ], + [ + 803, + 359 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 798, + 373 + ], + [ + 788, + 373 + ], + [ + 789, + 395 + ], + [ + 798, + 395 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 755, + 398 + ], + [ + 755, + 445 + ], + [ + 795, + 445 + ], + [ + 794, + 397 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 874, + 288 + ], + [ + 874, + 315 + ], + [ + 888, + 315 + ], + [ + 889, + 288 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 765, + 102 + ], + [ + 768, + 460 + ], + [ + 774, + 461 + ], + [ + 769, + 101 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 732, + 96 + ], + [ + 726, + 99 + ], + [ + 727, + 103 + ], + [ + 730, + 105 + ], + [ + 736, + 107 + ], + [ + 741, + 107 + ], + [ + 742, + 105 + ], + [ + 754, + 106 + ], + [ + 757, + 106 + ], + [ + 772, + 106 + ], + [ + 782, + 106 + ], + [ + 793, + 105 + ], + [ + 799, + 107 + ], + [ + 808, + 101 + ], + [ + 804, + 97 + ], + [ + 795, + 95 + ], + [ + 786, + 97 + ], + [ + 778, + 102 + ], + [ + 755, + 102 + ], + [ + 749, + 98 + ], + [ + 739, + 96 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 723, + 294 + ], + [ + 719, + 302 + ], + [ + 718, + 312 + ], + [ + 722, + 322 + ], + [ + 727, + 327 + ], + [ + 734, + 327 + ], + [ + 745, + 323 + ], + [ + 749, + 311 + ], + [ + 746, + 298 + ], + [ + 740, + 293 + ], + [ + 731, + 291 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 760, + 368 + ], + [ + 734, + 325 + ], + [ + 714, + 353 + ], + [ + 713, + 368 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 468, + 142 + ], + [ + 471, + 139 + ], + [ + 494, + 139 + ], + [ + 500, + 144 + ], + [ + 501, + 195 + ], + [ + 498, + 199 + ], + [ + 471, + 199 + ], + [ + 468, + 197 + ], + [ + 467, + 192 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 580, + 501 + ], + [ + 578, + 299 + ], + [ + 574, + 206 + ], + [ + 573, + 152 + ], + [ + 578, + 143 + ], + [ + 578, + 132 + ], + [ + 570, + 123 + ], + [ + 569, + 96 + ], + [ + 574, + 81 + ], + [ + 582, + 69 + ], + [ + 597, + 57 + ], + [ + 766, + 15 + ], + [ + 778, + 14 + ], + [ + 825, + 13 + ], + [ + 824, + 8 + ], + [ + 775, + 9 + ], + [ + 761, + 10 + ], + [ + 601, + 49 + ], + [ + 584, + 59 + ], + [ + 574, + 68 + ], + [ + 566, + 84 + ], + [ + 564, + 97 + ], + [ + 567, + 499 + ], + [ + 572, + 510 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 554, + 259 + ], + [ + 554, + 299 + ], + [ + 565, + 306 + ], + [ + 571, + 306 + ], + [ + 574, + 300 + ], + [ + 574, + 258 + ], + [ + 572, + 255 + ], + [ + 563, + 254 + ], + [ + 560, + 259 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 506, + 151 + ], + [ + 494, + 150 + ], + [ + 497, + 155 + ], + [ + 515, + 156 + ], + [ + 534, + 159 + ], + [ + 636, + 178 + ], + [ + 657, + 185 + ], + [ + 670, + 191 + ], + [ + 678, + 197 + ], + [ + 686, + 208 + ], + [ + 696, + 236 + ], + [ + 697, + 224 + ], + [ + 692, + 208 + ], + [ + 682, + 194 + ], + [ + 670, + 185 + ], + [ + 655, + 179 + ], + [ + 628, + 173 + ], + [ + 522, + 152 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 715, + 482 + ], + [ + 711, + 1 + ], + [ + 695, + 1 + ], + [ + 696, + 486 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 810, + 448 + ], + [ + 790, + 448 + ], + [ + 769, + 450 + ], + [ + 736, + 463 + ], + [ + 719, + 474 + ], + [ + 680, + 479 + ], + [ + 645, + 484 + ], + [ + 610, + 488 + ], + [ + 572, + 495 + ], + [ + 527, + 497 + ], + [ + 467, + 503 + ], + [ + 0, + 537 + ], + [ + 0, + 621 + ], + [ + 503, + 546 + ], + [ + 575, + 536 + ], + [ + 615, + 530 + ], + [ + 662, + 523 + ], + [ + 703, + 516 + ], + [ + 761, + 503 + ], + [ + 787, + 495 + ], + [ + 799, + 483 + ], + [ + 806, + 479 + ], + [ + 811, + 470 + ], + [ + 811, + 462 + ], + [ + 815, + 456 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 815, + 39 + ], + [ + 816, + 0 + ], + [ + 858, + 0 + ], + [ + 858, + 38 + ], + [ + 855, + 42 + ], + [ + 850, + 46 + ], + [ + 823, + 46 + ], + [ + 818, + 43 + ], + [ + 816, + 42 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 96, + 331 + ], + [ + 97, + 324 + ], + [ + 43, + 231 + ], + [ + 1, + 299 + ], + [ + 1, + 333 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 45, + 539 + ], + [ + 44, + 230 + ], + [ + 33, + 230 + ], + [ + 33, + 539 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 10, + 508 + ], + [ + 10, + 464 + ], + [ + 33, + 463 + ], + [ + 34, + 539 + ], + [ + 20, + 539 + ], + [ + 10, + 536 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1119, + 399 + ], + [ + 1122, + 395 + ], + [ + 1125, + 395 + ], + [ + 1128, + 396 + ], + [ + 1129, + 410 + ], + [ + 1120, + 411 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1132, + 386 + ], + [ + 1131, + 450 + ], + [ + 1135, + 450 + ], + [ + 1134, + 386 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1138, + 389 + ], + [ + 1131, + 389 + ], + [ + 1131, + 407 + ], + [ + 1138, + 407 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1250, + 456 + ], + [ + 1155, + 457 + ], + [ + 1064, + 458 + ], + [ + 1059, + 461 + ], + [ + 1063, + 464 + ], + [ + 1104, + 469 + ], + [ + 1178, + 482 + ], + [ + 1249, + 494 + ], + [ + 1293, + 501 + ], + [ + 1321, + 507 + ], + [ + 1349, + 514 + ], + [ + 1379, + 524 + ], + [ + 1404, + 536 + ], + [ + 1439, + 563 + ], + [ + 1488, + 597 + ], + [ + 1546, + 635 + ], + [ + 1580, + 658 + ], + [ + 1634, + 681 + ], + [ + 1734, + 721 + ], + [ + 1854, + 766 + ], + [ + 2000, + 818 + ], + [ + 2047, + 837 + ], + [ + 2047, + 497 + ], + [ + 1635, + 466 + ], + [ + 1276, + 450 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 2029, + 267 + ], + [ + 1790, + 303 + ], + [ + 1701, + 316 + ], + [ + 1523, + 342 + ], + [ + 1451, + 352 + ], + [ + 1451, + 373 + ], + [ + 1558, + 416 + ], + [ + 2047, + 406 + ], + [ + 2047, + 262 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1244, + 442 + ], + [ + 1226, + 450 + ], + [ + 1222, + 457 + ], + [ + 1265, + 459 + ], + [ + 1304, + 463 + ], + [ + 1404, + 472 + ], + [ + 1536, + 480 + ], + [ + 1911, + 510 + ], + [ + 2047, + 518 + ], + [ + 2047, + 381 + ], + [ + 2015, + 361 + ], + [ + 1999, + 343 + ], + [ + 1984, + 345 + ], + [ + 1951, + 349 + ], + [ + 1935, + 357 + ], + [ + 1908, + 355 + ], + [ + 1886, + 349 + ], + [ + 1851, + 361 + ], + [ + 1826, + 377 + ], + [ + 1794, + 385 + ], + [ + 1791, + 368 + ], + [ + 1780, + 375 + ], + [ + 1776, + 383 + ], + [ + 1764, + 380 + ], + [ + 1742, + 370 + ], + [ + 1724, + 362 + ], + [ + 1712, + 371 + ], + [ + 1690, + 375 + ], + [ + 1675, + 378 + ], + [ + 1671, + 382 + ], + [ + 1650, + 377 + ], + [ + 1639, + 386 + ], + [ + 1624, + 378 + ], + [ + 1596, + 349 + ], + [ + 1562, + 353 + ], + [ + 1540, + 379 + ], + [ + 1529, + 390 + ], + [ + 1496, + 370 + ], + [ + 1489, + 364 + ], + [ + 1451, + 360 + ], + [ + 1398, + 349 + ], + [ + 1219, + 358 + ], + [ + 1194, + 372 + ], + [ + 1204, + 410 + ], + [ + 1219, + 405 + ], + [ + 1219, + 392 + ], + [ + 1224, + 389 + ], + [ + 1231, + 405 + ], + [ + 1232, + 413 + ], + [ + 1240, + 409 + ], + [ + 1247, + 408 + ], + [ + 1246, + 441 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1087, + 459 + ], + [ + 1087, + 350 + ], + [ + 1089, + 350 + ], + [ + 1089, + 459 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1100, + 355 + ], + [ + 1087, + 335 + ], + [ + 1075, + 355 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1096, + 373 + ], + [ + 1095, + 354 + ], + [ + 1080, + 355 + ], + [ + 1080, + 374 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1106, + 372 + ], + [ + 1101, + 369 + ], + [ + 1097, + 370 + ], + [ + 1093, + 373 + ], + [ + 1092, + 380 + ], + [ + 1095, + 386 + ], + [ + 1102, + 386 + ], + [ + 1106, + 384 + ], + [ + 1108, + 378 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1107, + 392 + ], + [ + 1099, + 384 + ], + [ + 1093, + 392 + ], + [ + 1100, + 398 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1082, + 402 + ], + [ + 1082, + 375 + ], + [ + 1093, + 375 + ], + [ + 1094, + 402 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1150, + 201 + ], + [ + 1152, + 459 + ], + [ + 1155, + 459 + ], + [ + 1152, + 201 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1139, + 195 + ], + [ + 1134, + 197 + ], + [ + 1137, + 202 + ], + [ + 1143, + 203 + ], + [ + 1145, + 202 + ], + [ + 1154, + 202 + ], + [ + 1154, + 199 + ], + [ + 1145, + 195 + ], + [ + 1136, + 196 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1141, + 369 + ], + [ + 1141, + 386 + ], + [ + 1157, + 385 + ], + [ + 1160, + 377 + ], + [ + 1156, + 368 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1188, + 391 + ], + [ + 1180, + 387 + ], + [ + 1175, + 387 + ], + [ + 1177, + 411 + ], + [ + 1185, + 411 + ], + [ + 1188, + 402 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1168, + 392 + ], + [ + 1169, + 410 + ], + [ + 1172, + 411 + ], + [ + 1179, + 411 + ], + [ + 1177, + 384 + ], + [ + 1170, + 388 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1175, + 381 + ], + [ + 1176, + 414 + ], + [ + 1179, + 414 + ], + [ + 1178, + 381 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1198, + 408 + ], + [ + 1168, + 409 + ], + [ + 1169, + 458 + ], + [ + 1198, + 457 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1181, + 313 + ], + [ + 1153, + 315 + ], + [ + 1154, + 386 + ], + [ + 1184, + 386 + ], + [ + 1182, + 318 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1232, + 466 + ], + [ + 1233, + 387 + ], + [ + 1231, + 386 + ], + [ + 1230, + 467 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1245, + 363 + ], + [ + 1219, + 364 + ], + [ + 1227, + 350 + ], + [ + 1240, + 351 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1241, + 378 + ], + [ + 1240, + 364 + ], + [ + 1223, + 364 + ], + [ + 1223, + 379 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1241, + 388 + ], + [ + 1241, + 378 + ], + [ + 1222, + 378 + ], + [ + 1222, + 389 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1235, + 420 + ], + [ + 1234, + 416 + ], + [ + 1227, + 416 + ], + [ + 1226, + 420 + ], + [ + 1226, + 422 + ], + [ + 1224, + 428 + ], + [ + 1221, + 441 + ], + [ + 1223, + 445 + ], + [ + 1227, + 444 + ], + [ + 1228, + 446 + ], + [ + 1226, + 463 + ], + [ + 1235, + 461 + ], + [ + 1235, + 449 + ], + [ + 1239, + 444 + ], + [ + 1239, + 440 + ], + [ + 1240, + 436 + ], + [ + 1239, + 423 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1283, + 466 + ], + [ + 1276, + 465 + ], + [ + 1268, + 463 + ], + [ + 1247, + 459 + ], + [ + 1230, + 458 + ], + [ + 1222, + 460 + ], + [ + 1208, + 469 + ], + [ + 1242, + 468 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1213, + 460 + ], + [ + 1199, + 455 + ], + [ + 1168, + 455 + ], + [ + 1154, + 462 + ], + [ + 1201, + 461 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1267, + 234 + ], + [ + 1256, + 234 + ], + [ + 1246, + 226 + ], + [ + 1234, + 229 + ], + [ + 1230, + 239 + ], + [ + 1216, + 228 + ], + [ + 1211, + 226 + ], + [ + 1200, + 234 + ], + [ + 1200, + 241 + ], + [ + 1195, + 246 + ], + [ + 1193, + 264 + ], + [ + 1197, + 275 + ], + [ + 1205, + 277 + ], + [ + 1203, + 287 + ], + [ + 1196, + 293 + ], + [ + 1194, + 291 + ], + [ + 1188, + 281 + ], + [ + 1173, + 283 + ], + [ + 1160, + 271 + ], + [ + 1142, + 273 + ], + [ + 1135, + 281 + ], + [ + 1131, + 281 + ], + [ + 1133, + 289 + ], + [ + 1135, + 300 + ], + [ + 1139, + 300 + ], + [ + 1139, + 310 + ], + [ + 1142, + 330 + ], + [ + 1142, + 345 + ], + [ + 1138, + 350 + ], + [ + 1143, + 363 + ], + [ + 1148, + 369 + ], + [ + 1155, + 371 + ], + [ + 1156, + 315 + ], + [ + 1170, + 314 + ], + [ + 1176, + 318 + ], + [ + 1179, + 320 + ], + [ + 1180, + 328 + ], + [ + 1177, + 332 + ], + [ + 1181, + 341 + ], + [ + 1181, + 360 + ], + [ + 1182, + 376 + ], + [ + 1189, + 375 + ], + [ + 1191, + 373 + ], + [ + 1195, + 374 + ], + [ + 1218, + 360 + ], + [ + 1229, + 358 + ], + [ + 1242, + 362 + ], + [ + 1247, + 373 + ], + [ + 1244, + 462 + ], + [ + 1250, + 460 + ], + [ + 1251, + 404 + ], + [ + 1255, + 401 + ], + [ + 1267, + 418 + ], + [ + 1288, + 420 + ], + [ + 1305, + 414 + ], + [ + 1313, + 414 + ], + [ + 1312, + 467 + ], + [ + 1320, + 467 + ], + [ + 1320, + 398 + ], + [ + 1333, + 378 + ], + [ + 1349, + 365 + ], + [ + 1359, + 356 + ], + [ + 1375, + 367 + ], + [ + 1383, + 367 + ], + [ + 1403, + 352 + ], + [ + 1415, + 351 + ], + [ + 1427, + 358 + ], + [ + 1432, + 360 + ], + [ + 1437, + 483 + ], + [ + 1444, + 483 + ], + [ + 1441, + 398 + ], + [ + 1459, + 386 + ], + [ + 1480, + 369 + ], + [ + 1488, + 355 + ], + [ + 1506, + 348 + ], + [ + 1505, + 318 + ], + [ + 1509, + 296 + ], + [ + 1496, + 196 + ], + [ + 1494, + 182 + ], + [ + 1490, + 151 + ], + [ + 1470, + 159 + ], + [ + 1450, + 143 + ], + [ + 1433, + 155 + ], + [ + 1419, + 158 + ], + [ + 1415, + 152 + ], + [ + 1421, + 144 + ], + [ + 1402, + 132 + ], + [ + 1385, + 139 + ], + [ + 1373, + 145 + ], + [ + 1360, + 142 + ], + [ + 1357, + 134 + ], + [ + 1351, + 145 + ], + [ + 1343, + 142 + ], + [ + 1332, + 144 + ], + [ + 1326, + 148 + ], + [ + 1334, + 154 + ], + [ + 1323, + 159 + ], + [ + 1323, + 168 + ], + [ + 1314, + 174 + ], + [ + 1319, + 177 + ], + [ + 1328, + 177 + ], + [ + 1319, + 189 + ], + [ + 1323, + 199 + ], + [ + 1313, + 206 + ], + [ + 1318, + 229 + ], + [ + 1320, + 236 + ], + [ + 1309, + 245 + ], + [ + 1297, + 252 + ], + [ + 1296, + 259 + ], + [ + 1289, + 254 + ], + [ + 1280, + 239 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1482, + 486 + ], + [ + 1439, + 480 + ], + [ + 1402, + 480 + ], + [ + 1388, + 484 + ], + [ + 1392, + 487 + ], + [ + 1430, + 489 + ], + [ + 1458, + 490 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1447, + 441 + ], + [ + 1442, + 442 + ], + [ + 1439, + 446 + ], + [ + 1433, + 442 + ], + [ + 1425, + 443 + ], + [ + 1422, + 449 + ], + [ + 1419, + 455 + ], + [ + 1419, + 458 + ], + [ + 1427, + 448 + ], + [ + 1431, + 445 + ], + [ + 1436, + 447 + ], + [ + 1436, + 449 + ], + [ + 1429, + 481 + ], + [ + 1433, + 481 + ], + [ + 1440, + 453 + ], + [ + 1442, + 449 + ], + [ + 1446, + 445 + ], + [ + 1450, + 447 + ], + [ + 1453, + 451 + ], + [ + 1460, + 477 + ], + [ + 1463, + 474 + ], + [ + 1455, + 448 + ], + [ + 1452, + 443 + ], + [ + 1449, + 441 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1478, + 471 + ], + [ + 1446, + 472 + ], + [ + 1446, + 490 + ], + [ + 1479, + 490 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1262, + 119 + ], + [ + 1263, + 240 + ], + [ + 1268, + 247 + ], + [ + 1265, + 121 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1262, + 338 + ], + [ + 1262, + 472 + ], + [ + 1269, + 472 + ], + [ + 1267, + 345 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1277, + 373 + ], + [ + 1275, + 348 + ], + [ + 1255, + 348 + ], + [ + 1257, + 374 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1240, + 114 + ], + [ + 1245, + 120 + ], + [ + 1251, + 121 + ], + [ + 1254, + 120 + ], + [ + 1265, + 122 + ], + [ + 1269, + 119 + ], + [ + 1263, + 113 + ], + [ + 1252, + 110 + ], + [ + 1244, + 111 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1016, + 477 + ], + [ + 1022, + 474 + ], + [ + 1024, + 460 + ], + [ + 1024, + 448 + ], + [ + 1023, + 432 + ], + [ + 1014, + 412 + ], + [ + 973, + 411 + ], + [ + 966, + 412 + ], + [ + 962, + 416 + ], + [ + 956, + 428 + ], + [ + 951, + 429 + ], + [ + 949, + 431 + ], + [ + 950, + 438 + ], + [ + 955, + 439 + ], + [ + 952, + 453 + ], + [ + 952, + 467 + ], + [ + 955, + 473 + ], + [ + 962, + 475 + ], + [ + 966, + 475 + ], + [ + 968, + 469 + ], + [ + 1005, + 470 + ], + [ + 1008, + 477 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1514, + 498 + ], + [ + 1516, + 347 + ], + [ + 1511, + 339 + ], + [ + 1505, + 349 + ], + [ + 1504, + 498 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1504, + 301 + ], + [ + 1499, + 307 + ], + [ + 1496, + 316 + ], + [ + 1497, + 324 + ], + [ + 1503, + 332 + ], + [ + 1506, + 333 + ], + [ + 1517, + 334 + ], + [ + 1521, + 332 + ], + [ + 1528, + 324 + ], + [ + 1528, + 315 + ], + [ + 1524, + 304 + ], + [ + 1512, + 299 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1524, + 350 + ], + [ + 1523, + 333 + ], + [ + 1500, + 332 + ], + [ + 1500, + 350 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1710, + 506 + ], + [ + 1709, + 403 + ], + [ + 1703, + 300 + ], + [ + 1716, + 300 + ], + [ + 1726, + 293 + ], + [ + 1740, + 292 + ], + [ + 1750, + 302 + ], + [ + 1757, + 314 + ], + [ + 1768, + 317 + ], + [ + 1797, + 314 + ], + [ + 1813, + 303 + ], + [ + 1822, + 289 + ], + [ + 1832, + 288 + ], + [ + 1833, + 276 + ], + [ + 1845, + 281 + ], + [ + 1858, + 276 + ], + [ + 1884, + 263 + ], + [ + 1905, + 233 + ], + [ + 1910, + 197 + ], + [ + 1890, + 197 + ], + [ + 1890, + 179 + ], + [ + 1878, + 173 + ], + [ + 1879, + 158 + ], + [ + 1889, + 148 + ], + [ + 1890, + 125 + ], + [ + 1872, + 139 + ], + [ + 1865, + 133 + ], + [ + 1859, + 127 + ], + [ + 1873, + 113 + ], + [ + 1871, + 93 + ], + [ + 1861, + 89 + ], + [ + 1853, + 76 + ], + [ + 1863, + 54 + ], + [ + 1858, + 50 + ], + [ + 1850, + 39 + ], + [ + 1854, + 23 + ], + [ + 1830, + 47 + ], + [ + 1820, + 35 + ], + [ + 1814, + 19 + ], + [ + 1810, + 5 + ], + [ + 1810, + 1 + ], + [ + 1587, + 1 + ], + [ + 1588, + 5 + ], + [ + 1594, + 9 + ], + [ + 1598, + 17 + ], + [ + 1597, + 39 + ], + [ + 1571, + 41 + ], + [ + 1559, + 23 + ], + [ + 1544, + 41 + ], + [ + 1526, + 73 + ], + [ + 1517, + 101 + ], + [ + 1509, + 124 + ], + [ + 1508, + 137 + ], + [ + 1529, + 141 + ], + [ + 1537, + 156 + ], + [ + 1516, + 162 + ], + [ + 1508, + 179 + ], + [ + 1501, + 193 + ], + [ + 1483, + 204 + ], + [ + 1471, + 254 + ], + [ + 1466, + 269 + ], + [ + 1485, + 289 + ], + [ + 1501, + 309 + ], + [ + 1513, + 318 + ], + [ + 1528, + 319 + ], + [ + 1538, + 322 + ], + [ + 1547, + 329 + ], + [ + 1560, + 326 + ], + [ + 1596, + 308 + ], + [ + 1593, + 304 + ], + [ + 1596, + 299 + ], + [ + 1593, + 293 + ], + [ + 1603, + 270 + ], + [ + 1615, + 292 + ], + [ + 1608, + 302 + ], + [ + 1632, + 309 + ], + [ + 1646, + 303 + ], + [ + 1649, + 311 + ], + [ + 1657, + 307 + ], + [ + 1665, + 315 + ], + [ + 1676, + 335 + ], + [ + 1685, + 343 + ], + [ + 1692, + 348 + ], + [ + 1694, + 497 + ], + [ + 1686, + 500 + ], + [ + 1700, + 508 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1783, + 507 + ], + [ + 1732, + 503 + ], + [ + 1709, + 493 + ], + [ + 1681, + 498 + ], + [ + 1651, + 501 + ], + [ + 1629, + 503 + ], + [ + 1613, + 508 + ], + [ + 1616, + 512 + ], + [ + 1697, + 516 + ], + [ + 1750, + 519 + ], + [ + 1807, + 513 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1343, + 447 + ], + [ + 1343, + 495 + ], + [ + 1348, + 495 + ], + [ + 1348, + 447 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1368, + 449 + ], + [ + 1369, + 501 + ], + [ + 1375, + 501 + ], + [ + 1374, + 449 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1393, + 451 + ], + [ + 1393, + 508 + ], + [ + 1399, + 508 + ], + [ + 1398, + 451 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1423, + 516 + ], + [ + 1422, + 454 + ], + [ + 1416, + 453 + ], + [ + 1416, + 516 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1447, + 523 + ], + [ + 1444, + 455 + ], + [ + 1436, + 455 + ], + [ + 1439, + 523 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1491, + 546 + ], + [ + 1490, + 463 + ], + [ + 1481, + 463 + ], + [ + 1482, + 547 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1522, + 560 + ], + [ + 1522, + 468 + ], + [ + 1513, + 468 + ], + [ + 1513, + 561 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1732, + 563 + ], + [ + 1728, + 465 + ], + [ + 1719, + 465 + ], + [ + 1720, + 563 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1865, + 463 + ], + [ + 1866, + 559 + ], + [ + 1878, + 559 + ], + [ + 1875, + 464 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1946, + 186 + ], + [ + 1954, + 204 + ], + [ + 1952, + 210 + ], + [ + 1956, + 217 + ], + [ + 1962, + 222 + ], + [ + 1969, + 223 + ], + [ + 1962, + 233 + ], + [ + 1978, + 233 + ], + [ + 1970, + 254 + ], + [ + 1979, + 260 + ], + [ + 2000, + 272 + ], + [ + 2047, + 265 + ], + [ + 2047, + 170 + ], + [ + 2044, + 174 + ], + [ + 2028, + 170 + ], + [ + 2018, + 176 + ], + [ + 2025, + 185 + ], + [ + 2016, + 189 + ], + [ + 2013, + 206 + ], + [ + 2000, + 194 + ], + [ + 1996, + 194 + ], + [ + 1989, + 200 + ], + [ + 1979, + 203 + ], + [ + 1971, + 201 + ], + [ + 1976, + 195 + ], + [ + 1971, + 190 + ], + [ + 1961, + 190 + ], + [ + 1950, + 183 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1587, + 566 + ], + [ + 1590, + 1 + ], + [ + 1575, + 1 + ], + [ + 1565, + 564 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1700, + 112 + ], + [ + 1701, + 92 + ], + [ + 1694, + 87 + ], + [ + 1590, + 88 + ], + [ + 1589, + 118 + ], + [ + 1696, + 116 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1687, + 128 + ], + [ + 1529, + 129 + ], + [ + 1523, + 134 + ], + [ + 1519, + 285 + ], + [ + 1524, + 292 + ], + [ + 1678, + 292 + ], + [ + 1688, + 286 + ], + [ + 1692, + 133 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000098_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000098_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d668e51a8df172aaca9a3bea4ada0750516302f1 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000098_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000098_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000098_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..9d0cfbaa97a793f5275971b0420ac3e703881f73 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000098_000019_gtFine_polygons.json @@ -0,0 +1,2795 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 2048, + 186 + ], + [ + 2048, + 0 + ], + [ + 874, + 0 + ], + [ + 879, + 206 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2049, + 316 + ], + [ + 1607, + 336 + ], + [ + 1361, + 332 + ], + [ + 863, + 356 + ], + [ + 0, + 373 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1169, + 413 + ], + [ + 1112, + 410 + ], + [ + 1020, + 408 + ], + [ + 928, + 408 + ], + [ + 888, + 408 + ], + [ + 876, + 389 + ], + [ + 1184, + 386 + ], + [ + 1184, + 407 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1815, + 398 + ], + [ + 1750, + 405 + ], + [ + 1722, + 414 + ], + [ + 1681, + 413 + ], + [ + 1662, + 418 + ], + [ + 1578, + 421 + ], + [ + 1498, + 415 + ], + [ + 1427, + 413 + ], + [ + 1410, + 399 + ], + [ + 1405, + 379 + ], + [ + 1398, + 255 + ], + [ + 1666, + 263 + ], + [ + 1828, + 256 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 971, + 420 + ], + [ + 906, + 416 + ], + [ + 892, + 414 + ], + [ + 245, + 423 + ], + [ + 1, + 431 + ], + [ + 0, + 490 + ], + [ + 204, + 480 + ], + [ + 383, + 475 + ], + [ + 533, + 468 + ], + [ + 710, + 458 + ], + [ + 807, + 458 + ], + [ + 875, + 455 + ], + [ + 943, + 448 + ], + [ + 1001, + 440 + ], + [ + 1020, + 437 + ], + [ + 1032, + 434 + ], + [ + 1035, + 429 + ], + [ + 1024, + 424 + ], + [ + 1006, + 421 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 178, + 868 + ], + [ + 71, + 884 + ], + [ + 0, + 888 + ], + [ + 1, + 556 + ], + [ + 594, + 583 + ], + [ + 719, + 651 + ], + [ + 533, + 729 + ], + [ + 408, + 779 + ], + [ + 317, + 818 + ], + [ + 248, + 846 + ], + [ + 213, + 859 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1174, + 435 + ], + [ + 1142, + 440 + ], + [ + 1103, + 442 + ], + [ + 1088, + 450 + ], + [ + 1001, + 460 + ], + [ + 872, + 476 + ], + [ + 817, + 469 + ], + [ + 615, + 475 + ], + [ + 602, + 458 + ], + [ + 592, + 439 + ], + [ + 569, + 464 + ], + [ + 547, + 428 + ], + [ + 538, + 480 + ], + [ + 515, + 450 + ], + [ + 511, + 490 + ], + [ + 481, + 451 + ], + [ + 472, + 474 + ], + [ + 433, + 464 + ], + [ + 408, + 472 + ], + [ + 393, + 477 + ], + [ + 401, + 492 + ], + [ + 320, + 498 + ], + [ + 242, + 502 + ], + [ + 230, + 490 + ], + [ + 204, + 479 + ], + [ + 0, + 501 + ], + [ + 0, + 524 + ], + [ + 1, + 641 + ], + [ + 255, + 642 + ], + [ + 500, + 644 + ], + [ + 691, + 647 + ], + [ + 721, + 651 + ], + [ + 816, + 616 + ], + [ + 866, + 596 + ], + [ + 973, + 569 + ], + [ + 1064, + 548 + ], + [ + 1144, + 533 + ], + [ + 1210, + 520 + ], + [ + 1300, + 505 + ], + [ + 1317, + 503 + ], + [ + 1389, + 480 + ], + [ + 1447, + 464 + ], + [ + 1472, + 456 + ], + [ + 1495, + 446 + ], + [ + 1508, + 439 + ], + [ + 1510, + 436 + ], + [ + 1510, + 430 + ], + [ + 1501, + 427 + ], + [ + 1482, + 427 + ], + [ + 1453, + 424 + ], + [ + 1438, + 422 + ], + [ + 1432, + 419 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1805, + 416 + ], + [ + 1768, + 422 + ], + [ + 1740, + 428 + ], + [ + 1702, + 442 + ], + [ + 1683, + 454 + ], + [ + 1667, + 481 + ], + [ + 1661, + 503 + ], + [ + 1668, + 519 + ], + [ + 1681, + 531 + ], + [ + 1692, + 539 + ], + [ + 1737, + 562 + ], + [ + 1807, + 589 + ], + [ + 1912, + 621 + ], + [ + 2048, + 659 + ], + [ + 2048, + 438 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1063, + 167 + ], + [ + 1062, + 39 + ], + [ + 1057, + 35 + ], + [ + 1057, + 33 + ], + [ + 1107, + 25 + ], + [ + 1106, + 8 + ], + [ + 1101, + 1 + ], + [ + 964, + 2 + ], + [ + 940, + 6 + ], + [ + 940, + 3 + ], + [ + 930, + 7 + ], + [ + 926, + 9 + ], + [ + 918, + 11 + ], + [ + 917, + 1 + ], + [ + 0, + 1 + ], + [ + 0, + 243 + ], + [ + 773, + 444 + ], + [ + 862, + 437 + ], + [ + 896, + 427 + ], + [ + 898, + 423 + ], + [ + 899, + 391 + ], + [ + 1394, + 303 + ], + [ + 1511, + 300 + ], + [ + 1587, + 308 + ], + [ + 1712, + 298 + ], + [ + 1814, + 312 + ], + [ + 1937, + 322 + ], + [ + 2048, + 141 + ], + [ + 2047, + 29 + ], + [ + 2034, + 31 + ], + [ + 2026, + 35 + ], + [ + 2015, + 29 + ], + [ + 1946, + 20 + ], + [ + 1928, + 24 + ], + [ + 1925, + 28 + ], + [ + 1918, + 26 + ], + [ + 1881, + 31 + ], + [ + 1871, + 38 + ], + [ + 1870, + 34 + ], + [ + 1817, + 30 + ], + [ + 1735, + 45 + ], + [ + 1736, + 67 + ], + [ + 1647, + 81 + ], + [ + 1640, + 76 + ], + [ + 1638, + 76 + ], + [ + 1635, + 85 + ], + [ + 1633, + 97 + ], + [ + 1568, + 106 + ], + [ + 1569, + 129 + ], + [ + 1475, + 115 + ], + [ + 1419, + 121 + ], + [ + 1420, + 144 + ], + [ + 1386, + 163 + ], + [ + 1063, + 179 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 661, + 143 + ], + [ + 652, + 133 + ], + [ + 626, + 131 + ], + [ + 602, + 127 + ], + [ + 575, + 132 + ], + [ + 547, + 136 + ], + [ + 519, + 144 + ], + [ + 427, + 144 + ], + [ + 417, + 117 + ], + [ + 401, + 98 + ], + [ + 400, + 74 + ], + [ + 384, + 57 + ], + [ + 368, + 35 + ], + [ + 337, + 19 + ], + [ + 323, + 5 + ], + [ + 313, + 1 + ], + [ + 0, + 1 + ], + [ + 0, + 218 + ], + [ + 698, + 187 + ], + [ + 670, + 151 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1965, + 117 + ], + [ + 1977, + 100 + ], + [ + 2015, + 104 + ], + [ + 2019, + 88 + ], + [ + 2027, + 71 + ], + [ + 2038, + 52 + ], + [ + 2048, + 42 + ], + [ + 2047, + 127 + ], + [ + 2004, + 130 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1351, + 146 + ], + [ + 1350, + 1 + ], + [ + 1341, + 1 + ], + [ + 1342, + 152 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1418, + 158 + ], + [ + 1415, + 1 + ], + [ + 1408, + 1 + ], + [ + 1409, + 152 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1450, + 154 + ], + [ + 1447, + 1 + ], + [ + 1438, + 1 + ], + [ + 1441, + 7 + ], + [ + 1444, + 30 + ], + [ + 1446, + 160 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1647, + 399 + ], + [ + 1643, + 274 + ], + [ + 1637, + 282 + ], + [ + 1636, + 401 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1750, + 270 + ], + [ + 1748, + 432 + ], + [ + 1752, + 432 + ], + [ + 1753, + 266 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1558, + 368 + ], + [ + 1539, + 349 + ], + [ + 1510, + 347 + ], + [ + 1471, + 344 + ], + [ + 1429, + 353 + ], + [ + 1428, + 408 + ], + [ + 1560, + 394 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1482, + 423 + ], + [ + 1483, + 298 + ], + [ + 1487, + 290 + ], + [ + 1493, + 282 + ], + [ + 1502, + 281 + ], + [ + 1495, + 286 + ], + [ + 1490, + 290 + ], + [ + 1486, + 299 + ], + [ + 1485, + 430 + ], + [ + 1481, + 429 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1495, + 348 + ], + [ + 1486, + 348 + ], + [ + 1486, + 373 + ], + [ + 1495, + 373 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1578, + 291 + ], + [ + 1578, + 278 + ], + [ + 1566, + 276 + ], + [ + 1567, + 292 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1178, + 319 + ], + [ + 1155, + 313 + ], + [ + 1142, + 315 + ], + [ + 1128, + 310 + ], + [ + 1120, + 328 + ], + [ + 1103, + 345 + ], + [ + 1082, + 337 + ], + [ + 1067, + 311 + ], + [ + 1067, + 311 + ], + [ + 1062, + 311 + ], + [ + 1055, + 337 + ], + [ + 1048, + 338 + ], + [ + 1045, + 294 + ], + [ + 1030, + 280 + ], + [ + 911, + 288 + ], + [ + 914, + 344 + ], + [ + 895, + 347 + ], + [ + 898, + 398 + ], + [ + 1050, + 402 + ], + [ + 1123, + 402 + ], + [ + 1179, + 404 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 996, + 270 + ], + [ + 998, + 435 + ], + [ + 1003, + 435 + ], + [ + 1000, + 270 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 967, + 348 + ], + [ + 969, + 422 + ], + [ + 972, + 421 + ], + [ + 969, + 348 + ] + ] + }, + { + "label": "bridge", + "polygon": [ + [ + 1805, + 92 + ], + [ + 1420, + 110 + ], + [ + 1375, + 112 + ], + [ + 1291, + 107 + ], + [ + 867, + 128 + ], + [ + 513, + 143 + ], + [ + 309, + 149 + ], + [ + 0, + 161 + ], + [ + 1, + 464 + ], + [ + 213, + 465 + ], + [ + 396, + 461 + ], + [ + 690, + 451 + ], + [ + 774, + 444 + ], + [ + 772, + 405 + ], + [ + 740, + 407 + ], + [ + 739, + 302 + ], + [ + 1159, + 296 + ], + [ + 1169, + 308 + ], + [ + 1169, + 467 + ], + [ + 1267, + 486 + ], + [ + 1318, + 486 + ], + [ + 1435, + 458 + ], + [ + 1433, + 284 + ], + [ + 1769, + 278 + ], + [ + 1809, + 315 + ], + [ + 1807, + 448 + ], + [ + 1820, + 472 + ], + [ + 1850, + 489 + ], + [ + 2047, + 484 + ], + [ + 2048, + 80 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 662, + 318 + ], + [ + 652, + 312 + ], + [ + 643, + 312 + ], + [ + 643, + 360 + ], + [ + 653, + 358 + ], + [ + 661, + 351 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 639, + 301 + ], + [ + 638, + 494 + ], + [ + 649, + 493 + ], + [ + 644, + 300 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 3, + 702 + ], + [ + 99, + 702 + ], + [ + 386, + 700 + ], + [ + 498, + 709 + ], + [ + 501, + 742 + ], + [ + 385, + 789 + ], + [ + 248, + 846 + ], + [ + 212, + 862 + ], + [ + 173, + 868 + ], + [ + 62, + 887 + ], + [ + 0, + 891 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 147, + 303 + ], + [ + 137, + 287 + ], + [ + 89, + 298 + ], + [ + 117, + 257 + ], + [ + 104, + 247 + ], + [ + 88, + 254 + ], + [ + 63, + 248 + ], + [ + 21, + 239 + ], + [ + 0, + 242 + ], + [ + 0, + 420 + ], + [ + 177, + 381 + ], + [ + 143, + 363 + ], + [ + 131, + 353 + ], + [ + 121, + 347 + ], + [ + 141, + 329 + ], + [ + 147, + 316 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 49, + 391 + ], + [ + 0, + 389 + ], + [ + 0, + 523 + ], + [ + 44, + 521 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 309, + 554 + ], + [ + 303, + 305 + ], + [ + 314, + 276 + ], + [ + 363, + 282 + ], + [ + 411, + 286 + ], + [ + 427, + 258 + ], + [ + 392, + 232 + ], + [ + 356, + 235 + ], + [ + 309, + 210 + ], + [ + 316, + 194 + ], + [ + 357, + 145 + ], + [ + 374, + 118 + ], + [ + 375, + 78 + ], + [ + 382, + 50 + ], + [ + 368, + 31 + ], + [ + 348, + 19 + ], + [ + 339, + 6 + ], + [ + 337, + 0 + ], + [ + 182, + 0 + ], + [ + 179, + 26 + ], + [ + 182, + 114 + ], + [ + 213, + 161 + ], + [ + 225, + 187 + ], + [ + 271, + 203 + ], + [ + 277, + 245 + ], + [ + 240, + 255 + ], + [ + 214, + 247 + ], + [ + 201, + 256 + ], + [ + 209, + 268 + ], + [ + 233, + 281 + ], + [ + 181, + 300 + ], + [ + 191, + 325 + ], + [ + 218, + 310 + ], + [ + 252, + 298 + ], + [ + 290, + 304 + ], + [ + 297, + 552 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 371, + 341 + ], + [ + 284, + 341 + ], + [ + 240, + 345 + ], + [ + 245, + 372 + ], + [ + 248, + 560 + ], + [ + 260, + 560 + ], + [ + 259, + 366 + ], + [ + 312, + 367 + ], + [ + 323, + 550 + ], + [ + 332, + 550 + ], + [ + 327, + 367 + ], + [ + 351, + 366 + ], + [ + 359, + 552 + ], + [ + 372, + 551 + ], + [ + 366, + 364 + ], + [ + 374, + 347 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 239, + 131 + ], + [ + 189, + 146 + ], + [ + 185, + 162 + ], + [ + 155, + 166 + ], + [ + 130, + 175 + ], + [ + 122, + 175 + ], + [ + 124, + 1 + ], + [ + 236, + 0 + ], + [ + 236, + 25 + ], + [ + 193, + 35 + ], + [ + 194, + 54 + ], + [ + 235, + 57 + ], + [ + 232, + 83 + ], + [ + 193, + 90 + ], + [ + 193, + 108 + ], + [ + 238, + 111 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 14, + 24 + ], + [ + 5, + 18 + ], + [ + 5, + 8 + ], + [ + 6, + 0 + ], + [ + 66, + 2 + ], + [ + 67, + 166 + ], + [ + 21, + 157 + ], + [ + 6, + 138 + ], + [ + 3, + 122 + ], + [ + 7, + 108 + ], + [ + 12, + 103 + ], + [ + 4, + 85 + ], + [ + 4, + 57 + ], + [ + 8, + 41 + ], + [ + 16, + 37 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 130, + 176 + ], + [ + 98, + 174 + ], + [ + 61, + 159 + ], + [ + 65, + 130 + ], + [ + 97, + 129 + ], + [ + 93, + 112 + ], + [ + 65, + 102 + ], + [ + 65, + 69 + ], + [ + 100, + 74 + ], + [ + 97, + 62 + ], + [ + 60, + 46 + ], + [ + 63, + 21 + ], + [ + 100, + 24 + ], + [ + 131, + 35 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 138, + 662 + ], + [ + 132, + 168 + ], + [ + 129, + 2 + ], + [ + 118, + 2 + ], + [ + 97, + 662 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 93, + 365 + ], + [ + 80, + 785 + ], + [ + 105, + 785 + ], + [ + 122, + 367 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 99, + 11 + ], + [ + 98, + 154 + ], + [ + 153, + 156 + ], + [ + 159, + 1 + ], + [ + 100, + 1 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 200, + 382 + ], + [ + 21, + 383 + ], + [ + 15, + 651 + ], + [ + 207, + 643 + ], + [ + 206, + 385 + ], + [ + 203, + 381 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 807, + 15 + ], + [ + 775, + 40 + ], + [ + 778, + 58 + ], + [ + 760, + 76 + ], + [ + 761, + 91 + ], + [ + 761, + 101 + ], + [ + 758, + 117 + ], + [ + 763, + 133 + ], + [ + 768, + 165 + ], + [ + 761, + 206 + ], + [ + 760, + 222 + ], + [ + 775, + 234 + ], + [ + 801, + 240 + ], + [ + 801, + 279 + ], + [ + 814, + 293 + ], + [ + 826, + 327 + ], + [ + 830, + 367 + ], + [ + 836, + 508 + ], + [ + 841, + 508 + ], + [ + 837, + 378 + ], + [ + 849, + 339 + ], + [ + 860, + 300 + ], + [ + 881, + 298 + ], + [ + 906, + 276 + ], + [ + 898, + 255 + ], + [ + 845, + 248 + ], + [ + 858, + 228 + ], + [ + 841, + 214 + ], + [ + 834, + 199 + ], + [ + 839, + 176 + ], + [ + 853, + 190 + ], + [ + 875, + 198 + ], + [ + 864, + 168 + ], + [ + 851, + 141 + ], + [ + 855, + 123 + ], + [ + 848, + 93 + ], + [ + 858, + 86 + ], + [ + 856, + 50 + ], + [ + 827, + 23 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 882, + 375 + ], + [ + 799, + 376 + ], + [ + 801, + 503 + ], + [ + 809, + 503 + ], + [ + 810, + 385 + ], + [ + 846, + 390 + ], + [ + 851, + 504 + ], + [ + 857, + 504 + ], + [ + 857, + 398 + ], + [ + 870, + 398 + ], + [ + 871, + 504 + ], + [ + 878, + 503 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1783, + 435 + ], + [ + 1779, + 430 + ], + [ + 1779, + 418 + ], + [ + 1780, + 413 + ], + [ + 1789, + 410 + ], + [ + 1786, + 407 + ], + [ + 1788, + 401 + ], + [ + 1792, + 401 + ], + [ + 1795, + 404 + ], + [ + 1795, + 408 + ], + [ + 1802, + 412 + ], + [ + 1802, + 416 + ], + [ + 1803, + 430 + ], + [ + 1799, + 436 + ], + [ + 1797, + 466 + ], + [ + 1791, + 467 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 2036, + 509 + ], + [ + 1953, + 503 + ], + [ + 1921, + 505 + ], + [ + 1917, + 499 + ], + [ + 1870, + 496 + ], + [ + 1875, + 485 + ], + [ + 1950, + 468 + ], + [ + 2047, + 455 + ], + [ + 2048, + 510 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 2013, + 275 + ], + [ + 2003, + 277 + ], + [ + 1978, + 270 + ], + [ + 1953, + 278 + ], + [ + 1963, + 285 + ], + [ + 1963, + 300 + ], + [ + 1953, + 296 + ], + [ + 1952, + 311 + ], + [ + 1951, + 330 + ], + [ + 1936, + 337 + ], + [ + 1926, + 360 + ], + [ + 1918, + 373 + ], + [ + 1930, + 396 + ], + [ + 1938, + 417 + ], + [ + 1937, + 434 + ], + [ + 1920, + 437 + ], + [ + 1919, + 460 + ], + [ + 1898, + 460 + ], + [ + 1886, + 477 + ], + [ + 1875, + 483 + ], + [ + 1874, + 488 + ], + [ + 1883, + 491 + ], + [ + 1920, + 487 + ], + [ + 1940, + 495 + ], + [ + 1968, + 491 + ], + [ + 2003, + 484 + ], + [ + 2023, + 484 + ], + [ + 2048, + 480 + ], + [ + 2048, + 276 + ], + [ + 2044, + 276 + ], + [ + 2036, + 288 + ], + [ + 2027, + 291 + ], + [ + 2019, + 291 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1889, + 350 + ], + [ + 1889, + 492 + ], + [ + 1899, + 492 + ], + [ + 1898, + 348 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1877, + 368 + ], + [ + 1877, + 384 + ], + [ + 1882, + 391 + ], + [ + 1889, + 394 + ], + [ + 1900, + 394 + ], + [ + 1907, + 392 + ], + [ + 1912, + 382 + ], + [ + 1911, + 370 + ], + [ + 1902, + 360 + ], + [ + 1886, + 360 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1825, + 215 + ], + [ + 1823, + 361 + ], + [ + 1925, + 361 + ], + [ + 1928, + 214 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1995, + 510 + ], + [ + 1995, + 0 + ], + [ + 1989, + 0 + ], + [ + 1986, + 509 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000099_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000099_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..668ac6c508588199732eb4f16d9466af1c412d20 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000099_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000100_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000100_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..86e3b76cc37bd3cf8dda33dd7e75f8cebc4f1fbd Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000100_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000100_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000100_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..92a275fcd8d6dce12f091a6172b54548627ca33c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000100_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c42112e682ef6b6095090f4c469e8282bf494bc1 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8696f58860a8515fae5d76f6511a4cdc0779957d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..97afb465f3fca2eb7dc20545f56693357cc4d723 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000101_000019_gtFine_polygons.json @@ -0,0 +1,1817 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sidewalk", + "polygon": [ + [ + 366, + 1009 + ], + [ + 455, + 919 + ], + [ + 514, + 845 + ], + [ + 534, + 799 + ], + [ + 534, + 746 + ], + [ + 508, + 696 + ], + [ + 503, + 679 + ], + [ + 500, + 669 + ], + [ + 496, + 668 + ], + [ + 464, + 661 + ], + [ + 431, + 657 + ], + [ + 372, + 653 + ], + [ + 308, + 652 + ], + [ + 244, + 655 + ], + [ + 159, + 659 + ], + [ + 81, + 666 + ], + [ + 47, + 667 + ], + [ + 10, + 674 + ], + [ + 0, + 679 + ], + [ + 0, + 1024 + ], + [ + 345, + 1024 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 272, + 436 + ], + [ + 2048, + 406 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 433 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 22, + 573 + ], + [ + 536, + 550 + ], + [ + 1152, + 526 + ], + [ + 1705, + 502 + ], + [ + 1896, + 497 + ], + [ + 2048, + 493 + ], + [ + 2048, + 492 + ], + [ + 2048, + 477 + ], + [ + 2048, + 447 + ], + [ + 0, + 514 + ], + [ + 0, + 574 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1563, + 491 + ], + [ + 1737, + 491 + ], + [ + 1859, + 491 + ], + [ + 2028, + 492 + ], + [ + 2048, + 492 + ], + [ + 2048, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 540 + ], + [ + 0, + 543 + ], + [ + 14, + 546 + ], + [ + 254, + 536 + ], + [ + 543, + 526 + ], + [ + 827, + 515 + ], + [ + 1204, + 505 + ], + [ + 1330, + 500 + ], + [ + 1444, + 494 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 23, + 194 + ], + [ + 35, + 194 + ], + [ + 50, + 554 + ], + [ + 38, + 554 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 25, + 368 + ], + [ + 23, + 372 + ], + [ + 23, + 379 + ], + [ + 25, + 385 + ], + [ + 28, + 389 + ], + [ + 33, + 394 + ], + [ + 43, + 395 + ], + [ + 52, + 394 + ], + [ + 54, + 384 + ], + [ + 56, + 377 + ], + [ + 53, + 371 + ], + [ + 48, + 367 + ], + [ + 45, + 364 + ], + [ + 37, + 363 + ], + [ + 30, + 366 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 156, + 554 + ], + [ + 174, + 554 + ], + [ + 171, + 480 + ], + [ + 155, + 480 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 16, + 193 + ], + [ + 5, + 192 + ], + [ + 0, + 186 + ], + [ + 0, + 55 + ], + [ + 2, + 53 + ], + [ + 24, + 51 + ], + [ + 50, + 58 + ], + [ + 66, + 87 + ], + [ + 68, + 147 + ], + [ + 62, + 178 + ], + [ + 53, + 191 + ], + [ + 42, + 198 + ], + [ + 27, + 198 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 101, + 384 + ], + [ + 113, + 397 + ], + [ + 120, + 413 + ], + [ + 120, + 433 + ], + [ + 117, + 453 + ], + [ + 105, + 464 + ], + [ + 74, + 466 + ], + [ + 49, + 463 + ], + [ + 36, + 452 + ], + [ + 26, + 440 + ], + [ + 23, + 420 + ], + [ + 28, + 404 + ], + [ + 36, + 391 + ], + [ + 48, + 381 + ], + [ + 61, + 381 + ], + [ + 79, + 381 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 19, + 82 + ], + [ + 54, + 325 + ], + [ + 63, + 397 + ], + [ + 79, + 448 + ], + [ + 95, + 424 + ], + [ + 41, + 134 + ], + [ + 33, + 58 + ], + [ + 26, + 39 + ], + [ + 13, + 40 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 89, + 61 + ], + [ + 105, + 56 + ], + [ + 119, + 61 + ], + [ + 142, + 64 + ], + [ + 153, + 61 + ], + [ + 156, + 61 + ], + [ + 156, + 51 + ], + [ + 173, + 50 + ], + [ + 173, + 33 + ], + [ + 222, + 19 + ], + [ + 231, + 14 + ], + [ + 235, + 6 + ], + [ + 235, + 0 + ], + [ + 106, + 0 + ], + [ + 108, + 33 + ], + [ + 108, + 41 + ], + [ + 105, + 45 + ], + [ + 92, + 48 + ], + [ + 83, + 46 + ], + [ + 76, + 46 + ], + [ + 76, + 66 + ], + [ + 80, + 73 + ], + [ + 90, + 73 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 273, + 550 + ], + [ + 273, + 475 + ], + [ + 286, + 475 + ], + [ + 290, + 549 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 393, + 543 + ], + [ + 391, + 472 + ], + [ + 405, + 472 + ], + [ + 408, + 545 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 497, + 541 + ], + [ + 496, + 471 + ], + [ + 510, + 472 + ], + [ + 514, + 544 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 607, + 535 + ], + [ + 606, + 466 + ], + [ + 619, + 466 + ], + [ + 622, + 537 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 709, + 532 + ], + [ + 709, + 464 + ], + [ + 722, + 465 + ], + [ + 725, + 533 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 946, + 136 + ], + [ + 935, + 142 + ], + [ + 930, + 132 + ], + [ + 925, + 132 + ], + [ + 923, + 110 + ], + [ + 923, + 78 + ], + [ + 925, + 66 + ], + [ + 934, + 64 + ], + [ + 938, + 58 + ], + [ + 944, + 58 + ], + [ + 948, + 62 + ], + [ + 950, + 72 + ], + [ + 953, + 75 + ], + [ + 960, + 77 + ], + [ + 960, + 81 + ], + [ + 954, + 84 + ], + [ + 949, + 85 + ], + [ + 950, + 95 + ], + [ + 957, + 95 + ], + [ + 959, + 96 + ], + [ + 959, + 102 + ], + [ + 953, + 104 + ], + [ + 949, + 106 + ], + [ + 950, + 111 + ], + [ + 959, + 115 + ], + [ + 961, + 118 + ], + [ + 955, + 122 + ], + [ + 949, + 124 + ], + [ + 948, + 132 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 977, + 44 + ], + [ + 964, + 48 + ], + [ + 959, + 50 + ], + [ + 958, + 50 + ], + [ + 957, + 62 + ], + [ + 956, + 84 + ], + [ + 955, + 112 + ], + [ + 961, + 121 + ], + [ + 963, + 124 + ], + [ + 968, + 129 + ], + [ + 976, + 128 + ], + [ + 979, + 118 + ], + [ + 979, + 111 + ], + [ + 989, + 109 + ], + [ + 989, + 107 + ], + [ + 989, + 102 + ], + [ + 980, + 101 + ], + [ + 980, + 93 + ], + [ + 989, + 90 + ], + [ + 990, + 88 + ], + [ + 990, + 85 + ], + [ + 990, + 81 + ], + [ + 979, + 81 + ], + [ + 980, + 72 + ], + [ + 985, + 69 + ], + [ + 989, + 69 + ], + [ + 989, + 64 + ], + [ + 989, + 61 + ], + [ + 979, + 58 + ], + [ + 978, + 49 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 856, + 526 + ], + [ + 846, + 289 + ], + [ + 843, + 179 + ], + [ + 847, + 153 + ], + [ + 855, + 137 + ], + [ + 889, + 99 + ], + [ + 917, + 80 + ], + [ + 948, + 64 + ], + [ + 950, + 70 + ], + [ + 893, + 103 + ], + [ + 868, + 129 + ], + [ + 855, + 147 + ], + [ + 851, + 176 + ], + [ + 850, + 210 + ], + [ + 867, + 524 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 863, + 306 + ], + [ + 865, + 367 + ], + [ + 874, + 367 + ], + [ + 875, + 362 + ], + [ + 889, + 357 + ], + [ + 889, + 353 + ], + [ + 887, + 350 + ], + [ + 875, + 349 + ], + [ + 875, + 342 + ], + [ + 886, + 339 + ], + [ + 886, + 335 + ], + [ + 886, + 332 + ], + [ + 884, + 331 + ], + [ + 874, + 329 + ], + [ + 874, + 324 + ], + [ + 887, + 319 + ], + [ + 887, + 315 + ], + [ + 887, + 313 + ], + [ + 875, + 312 + ], + [ + 874, + 307 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 915, + 525 + ], + [ + 914, + 460 + ], + [ + 926, + 460 + ], + [ + 931, + 524 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1001, + 520 + ], + [ + 999, + 458 + ], + [ + 1013, + 458 + ], + [ + 1013, + 522 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1086, + 519 + ], + [ + 1085, + 455 + ], + [ + 1100, + 455 + ], + [ + 1100, + 519 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1169, + 516 + ], + [ + 1166, + 453 + ], + [ + 1181, + 452 + ], + [ + 1181, + 516 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1255, + 511 + ], + [ + 1253, + 450 + ], + [ + 1267, + 451 + ], + [ + 1268, + 511 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1322, + 508 + ], + [ + 1314, + 21 + ], + [ + 1318, + 21 + ], + [ + 1332, + 506 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1307, + 27 + ], + [ + 1308, + 16 + ], + [ + 1327, + 8 + ], + [ + 1347, + 10 + ], + [ + 1345, + 18 + ], + [ + 1327, + 27 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1683, + 426 + ], + [ + 1682, + 493 + ], + [ + 1687, + 493 + ], + [ + 1683, + 347 + ], + [ + 1681, + 347 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1679, + 338 + ], + [ + 1683, + 341 + ], + [ + 1685, + 352 + ], + [ + 1685, + 373 + ], + [ + 1685, + 395 + ], + [ + 1681, + 403 + ], + [ + 1678, + 402 + ], + [ + 1680, + 360 + ], + [ + 1678, + 346 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1930, + 129 + ], + [ + 1934, + 129 + ], + [ + 1940, + 493 + ], + [ + 1930, + 493 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1962, + 123 + ], + [ + 1952, + 120 + ], + [ + 1930, + 126 + ], + [ + 1930, + 132 + ], + [ + 1938, + 135 + ], + [ + 1962, + 128 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 366, + 1009 + ], + [ + 455, + 919 + ], + [ + 514, + 845 + ], + [ + 534, + 799 + ], + [ + 534, + 746 + ], + [ + 508, + 696 + ], + [ + 503, + 679 + ], + [ + 500, + 669 + ], + [ + 496, + 668 + ], + [ + 464, + 661 + ], + [ + 431, + 657 + ], + [ + 372, + 653 + ], + [ + 308, + 652 + ], + [ + 244, + 655 + ], + [ + 159, + 659 + ], + [ + 81, + 666 + ], + [ + 47, + 667 + ], + [ + 10, + 674 + ], + [ + 0, + 679 + ], + [ + 0, + 1024 + ], + [ + 345, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 0, + 721 + ], + [ + 71, + 718 + ], + [ + 171, + 709 + ], + [ + 505, + 697 + ], + [ + 522, + 718 + ], + [ + 535, + 751 + ], + [ + 534, + 796 + ], + [ + 523, + 827 + ], + [ + 512, + 849 + ], + [ + 480, + 889 + ], + [ + 397, + 980 + ], + [ + 353, + 1023 + ], + [ + 327, + 1023 + ], + [ + 312, + 984 + ], + [ + 0, + 1000 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 134, + 801 + ], + [ + 76, + 806 + ], + [ + 62, + 526 + ], + [ + 69, + 493 + ], + [ + 48, + 76 + ], + [ + 48, + 0 + ], + [ + 77, + 0 + ], + [ + 103, + 491 + ], + [ + 120, + 519 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 66, + 37 + ], + [ + 66, + 88 + ], + [ + 45, + 89 + ], + [ + 42, + 76 + ], + [ + 35, + 66 + ], + [ + 12, + 60 + ], + [ + 0, + 53 + ], + [ + 0, + 0 + ], + [ + 78, + 0 + ], + [ + 82, + 29 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 124, + 486 + ], + [ + 96, + 485 + ], + [ + 94, + 432 + ], + [ + 104, + 432 + ], + [ + 117, + 441 + ], + [ + 125, + 464 + ], + [ + 127, + 482 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 64, + 489 + ], + [ + 48, + 491 + ], + [ + 47, + 465 + ], + [ + 48, + 451 + ], + [ + 56, + 442 + ], + [ + 64, + 436 + ], + [ + 66, + 436 + ], + [ + 70, + 491 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000102_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000102_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d32e9a857852974ea9aa97b434820049cdc41d30 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000102_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000102_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000102_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..2c51980da1b6f9740c99fde35a4f8eeb17b5025f Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000102_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..e1581ccb315a84178c08921665c74a901a7bf706 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..360a0ed0f16a6fb6142b4ce12c1d844fc9c02cc7 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8d8e21e925f41fd156a7e2acfd1e594aaaeadd1c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000103_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000104_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000104_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..fd89b655207052e86e264fe027df544542a8bce3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000104_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000105_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000105_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..7e69745d8f1491e1b78df4bd5097367111c3d643 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000105_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000105_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000105_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..9325a0472a4a20cfa2ca751a8e8a9d9e4dfa91e7 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000105_000019_gtFine_polygons.json @@ -0,0 +1,3998 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 494, + 22 + ], + [ + 723, + 391 + ], + [ + 1252, + 398 + ], + [ + 2048, + 363 + ], + [ + 2048, + 0 + ], + [ + 470, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 471, + 538 + ], + [ + 802, + 438 + ], + [ + 1045, + 440 + ], + [ + 2048, + 884 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 565 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1489, + 453 + ], + [ + 1045, + 463 + ], + [ + 1020, + 481 + ], + [ + 1193, + 568 + ], + [ + 1740, + 786 + ], + [ + 2048, + 825 + ], + [ + 2048, + 486 + ], + [ + 2048, + 466 + ], + [ + 1628, + 435 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1904, + 990 + ], + [ + 1333, + 656 + ], + [ + 1133, + 556 + ], + [ + 1035, + 517 + ], + [ + 999, + 491 + ], + [ + 1031, + 482 + ], + [ + 1081, + 504 + ], + [ + 1174, + 533 + ], + [ + 1249, + 551 + ], + [ + 1502, + 553 + ], + [ + 1930, + 552 + ], + [ + 1920, + 566 + ], + [ + 1614, + 570 + ], + [ + 1612, + 582 + ], + [ + 1418, + 587 + ], + [ + 1399, + 591 + ], + [ + 1486, + 633 + ], + [ + 1582, + 619 + ], + [ + 2048, + 774 + ], + [ + 2048, + 1024 + ], + [ + 1960, + 1024 + ], + [ + 1931, + 1008 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 75, + 797 + ], + [ + 304, + 704 + ], + [ + 430, + 652 + ], + [ + 589, + 585 + ], + [ + 671, + 545 + ], + [ + 710, + 517 + ], + [ + 746, + 500 + ], + [ + 763, + 472 + ], + [ + 738, + 466 + ], + [ + 688, + 473 + ], + [ + 0, + 609 + ], + [ + 0, + 830 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2037, + 218 + ], + [ + 2012, + 219 + ], + [ + 2012, + 205 + ], + [ + 1999, + 205 + ], + [ + 1999, + 221 + ], + [ + 1970, + 225 + ], + [ + 1969, + 221 + ], + [ + 1953, + 222 + ], + [ + 1955, + 227 + ], + [ + 1937, + 228 + ], + [ + 1939, + 259 + ], + [ + 1836, + 265 + ], + [ + 1838, + 255 + ], + [ + 1797, + 242 + ], + [ + 1714, + 268 + ], + [ + 1671, + 278 + ], + [ + 1600, + 269 + ], + [ + 1583, + 262 + ], + [ + 1535, + 267 + ], + [ + 1533, + 219 + ], + [ + 1530, + 219 + ], + [ + 1531, + 266 + ], + [ + 1476, + 269 + ], + [ + 1474, + 250 + ], + [ + 1471, + 250 + ], + [ + 1472, + 269 + ], + [ + 1389, + 275 + ], + [ + 1388, + 232 + ], + [ + 1385, + 232 + ], + [ + 1385, + 274 + ], + [ + 1357, + 278 + ], + [ + 1305, + 283 + ], + [ + 1296, + 282 + ], + [ + 1294, + 238 + ], + [ + 1291, + 238 + ], + [ + 1290, + 286 + ], + [ + 1072, + 308 + ], + [ + 1039, + 308 + ], + [ + 1031, + 303 + ], + [ + 1020, + 303 + ], + [ + 1001, + 305 + ], + [ + 1000, + 313 + ], + [ + 968, + 311 + ], + [ + 968, + 356 + ], + [ + 956, + 356 + ], + [ + 956, + 351 + ], + [ + 950, + 351 + ], + [ + 950, + 356 + ], + [ + 934, + 356 + ], + [ + 934, + 353 + ], + [ + 928, + 353 + ], + [ + 926, + 358 + ], + [ + 893, + 358 + ], + [ + 856, + 356 + ], + [ + 809, + 357 + ], + [ + 776, + 358 + ], + [ + 751, + 357 + ], + [ + 729, + 355 + ], + [ + 726, + 465 + ], + [ + 819, + 458 + ], + [ + 835, + 453 + ], + [ + 883, + 451 + ], + [ + 920, + 450 + ], + [ + 1224, + 442 + ], + [ + 1902, + 414 + ], + [ + 2048, + 412 + ], + [ + 2048, + 207 + ], + [ + 2037, + 207 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 2038, + 328 + ], + [ + 2021, + 323 + ], + [ + 2001, + 322 + ], + [ + 1988, + 322 + ], + [ + 1979, + 307 + ], + [ + 1979, + 288 + ], + [ + 1973, + 272 + ], + [ + 1958, + 273 + ], + [ + 1948, + 294 + ], + [ + 1945, + 310 + ], + [ + 1941, + 296 + ], + [ + 1923, + 291 + ], + [ + 1903, + 305 + ], + [ + 1891, + 309 + ], + [ + 1890, + 327 + ], + [ + 1894, + 339 + ], + [ + 1882, + 352 + ], + [ + 1866, + 355 + ], + [ + 1849, + 355 + ], + [ + 1849, + 337 + ], + [ + 1839, + 311 + ], + [ + 1842, + 298 + ], + [ + 1848, + 284 + ], + [ + 1849, + 272 + ], + [ + 1837, + 256 + ], + [ + 1832, + 235 + ], + [ + 1810, + 218 + ], + [ + 1772, + 203 + ], + [ + 1751, + 196 + ], + [ + 1719, + 193 + ], + [ + 1689, + 201 + ], + [ + 1677, + 219 + ], + [ + 1678, + 238 + ], + [ + 1686, + 253 + ], + [ + 1667, + 259 + ], + [ + 1668, + 239 + ], + [ + 1668, + 224 + ], + [ + 1650, + 212 + ], + [ + 1626, + 215 + ], + [ + 1631, + 229 + ], + [ + 1632, + 249 + ], + [ + 1624, + 248 + ], + [ + 1596, + 231 + ], + [ + 1595, + 251 + ], + [ + 1581, + 271 + ], + [ + 1569, + 288 + ], + [ + 1568, + 303 + ], + [ + 1569, + 314 + ], + [ + 1559, + 299 + ], + [ + 1539, + 299 + ], + [ + 1523, + 302 + ], + [ + 1528, + 313 + ], + [ + 1512, + 297 + ], + [ + 1505, + 282 + ], + [ + 1495, + 278 + ], + [ + 1490, + 289 + ], + [ + 1474, + 305 + ], + [ + 1472, + 324 + ], + [ + 1455, + 331 + ], + [ + 1432, + 329 + ], + [ + 1418, + 332 + ], + [ + 1419, + 311 + ], + [ + 1420, + 299 + ], + [ + 1405, + 308 + ], + [ + 1399, + 341 + ], + [ + 1404, + 365 + ], + [ + 1396, + 386 + ], + [ + 1398, + 361 + ], + [ + 1391, + 335 + ], + [ + 1375, + 314 + ], + [ + 1357, + 293 + ], + [ + 1326, + 293 + ], + [ + 1294, + 284 + ], + [ + 1280, + 270 + ], + [ + 1261, + 263 + ], + [ + 1240, + 260 + ], + [ + 1230, + 246 + ], + [ + 1220, + 237 + ], + [ + 1197, + 232 + ], + [ + 1196, + 242 + ], + [ + 1190, + 247 + ], + [ + 1169, + 247 + ], + [ + 1152, + 256 + ], + [ + 1142, + 270 + ], + [ + 1122, + 270 + ], + [ + 1114, + 270 + ], + [ + 1109, + 270 + ], + [ + 1102, + 246 + ], + [ + 1092, + 226 + ], + [ + 1080, + 211 + ], + [ + 1065, + 195 + ], + [ + 1058, + 213 + ], + [ + 1059, + 250 + ], + [ + 1053, + 289 + ], + [ + 1048, + 315 + ], + [ + 1044, + 327 + ], + [ + 1028, + 317 + ], + [ + 998, + 317 + ], + [ + 989, + 332 + ], + [ + 979, + 350 + ], + [ + 947, + 380 + ], + [ + 941, + 388 + ], + [ + 935, + 415 + ], + [ + 934, + 451 + ], + [ + 977, + 477 + ], + [ + 1037, + 484 + ], + [ + 1068, + 491 + ], + [ + 1107, + 491 + ], + [ + 1149, + 490 + ], + [ + 1211, + 483 + ], + [ + 1265, + 481 + ], + [ + 1314, + 482 + ], + [ + 1396, + 484 + ], + [ + 1505, + 480 + ], + [ + 1536, + 484 + ], + [ + 1621, + 486 + ], + [ + 1678, + 481 + ], + [ + 1745, + 475 + ], + [ + 1882, + 473 + ], + [ + 2048, + 450 + ], + [ + 2048, + 323 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1824, + 390 + ], + [ + 1826, + 477 + ], + [ + 2026, + 482 + ], + [ + 2048, + 482 + ], + [ + 2048, + 334 + ], + [ + 2002, + 341 + ], + [ + 1913, + 356 + ], + [ + 1894, + 359 + ], + [ + 1877, + 366 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1912, + 533 + ], + [ + 1825, + 528 + ], + [ + 1821, + 525 + ], + [ + 1826, + 522 + ], + [ + 1820, + 512 + ], + [ + 1825, + 502 + ], + [ + 1829, + 496 + ], + [ + 1820, + 495 + ], + [ + 1822, + 486 + ], + [ + 1836, + 486 + ], + [ + 1861, + 454 + ], + [ + 1871, + 455 + ], + [ + 1895, + 479 + ], + [ + 1929, + 476 + ], + [ + 1929, + 466 + ], + [ + 1943, + 466 + ], + [ + 1947, + 475 + ], + [ + 1957, + 480 + ], + [ + 1960, + 495 + ], + [ + 1960, + 506 + ], + [ + 1986, + 508 + ], + [ + 2002, + 510 + ], + [ + 1992, + 523 + ], + [ + 1976, + 526 + ], + [ + 1958, + 528 + ], + [ + 1953, + 534 + ], + [ + 1935, + 534 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2005, + 725 + ], + [ + 1987, + 722 + ], + [ + 1991, + 479 + ], + [ + 2008, + 478 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1756, + 655 + ], + [ + 1767, + 654 + ], + [ + 1763, + 476 + ], + [ + 1753, + 475 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1611, + 618 + ], + [ + 1604, + 616 + ], + [ + 1603, + 473 + ], + [ + 1612, + 472 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1362, + 602 + ], + [ + 1386, + 603 + ], + [ + 1377, + 181 + ], + [ + 1377, + 71 + ], + [ + 1371, + 39 + ], + [ + 1364, + 6 + ], + [ + 1359, + 0 + ], + [ + 1348, + 0 + ], + [ + 1351, + 7 + ], + [ + 1354, + 25 + ], + [ + 1360, + 52 + ], + [ + 1361, + 83 + ], + [ + 1362, + 127 + ], + [ + 1362, + 165 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1349, + 252 + ], + [ + 1342, + 261 + ], + [ + 1340, + 272 + ], + [ + 1340, + 284 + ], + [ + 1346, + 295 + ], + [ + 1356, + 299 + ], + [ + 1367, + 300 + ], + [ + 1377, + 297 + ], + [ + 1389, + 287 + ], + [ + 1391, + 271 + ], + [ + 1387, + 256 + ], + [ + 1383, + 249 + ], + [ + 1375, + 243 + ], + [ + 1367, + 242 + ], + [ + 1357, + 243 + ], + [ + 1353, + 247 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1325, + 466 + ], + [ + 1334, + 467 + ], + [ + 1338, + 557 + ], + [ + 1331, + 557 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 878, + 310 + ], + [ + 871, + 326 + ], + [ + 866, + 350 + ], + [ + 868, + 359 + ], + [ + 893, + 361 + ], + [ + 893, + 353 + ], + [ + 889, + 337 + ], + [ + 881, + 313 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 891, + 302 + ], + [ + 896, + 302 + ], + [ + 902, + 393 + ], + [ + 897, + 393 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 746, + 307 + ], + [ + 742, + 307 + ], + [ + 745, + 389 + ], + [ + 749, + 389 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 837, + 341 + ], + [ + 823, + 335 + ], + [ + 818, + 336 + ], + [ + 816, + 347 + ], + [ + 812, + 348 + ], + [ + 811, + 341 + ], + [ + 806, + 336 + ], + [ + 801, + 335 + ], + [ + 798, + 343 + ], + [ + 796, + 346 + ], + [ + 787, + 349 + ], + [ + 784, + 355 + ], + [ + 779, + 366 + ], + [ + 773, + 377 + ], + [ + 763, + 375 + ], + [ + 754, + 375 + ], + [ + 734, + 370 + ], + [ + 735, + 401 + ], + [ + 745, + 396 + ], + [ + 786, + 395 + ], + [ + 817, + 395 + ], + [ + 831, + 399 + ], + [ + 850, + 394 + ], + [ + 864, + 397 + ], + [ + 886, + 406 + ], + [ + 891, + 395 + ], + [ + 930, + 400 + ], + [ + 946, + 408 + ], + [ + 958, + 420 + ], + [ + 972, + 411 + ], + [ + 971, + 392 + ], + [ + 957, + 381 + ], + [ + 943, + 381 + ], + [ + 932, + 380 + ], + [ + 919, + 376 + ], + [ + 903, + 376 + ], + [ + 893, + 378 + ], + [ + 879, + 369 + ], + [ + 865, + 360 + ], + [ + 853, + 349 + ], + [ + 841, + 344 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 843, + 299 + ], + [ + 848, + 300 + ], + [ + 852, + 394 + ], + [ + 848, + 394 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 835, + 415 + ], + [ + 836, + 430 + ], + [ + 824, + 430 + ], + [ + 828, + 455 + ], + [ + 871, + 453 + ], + [ + 883, + 436 + ], + [ + 881, + 417 + ], + [ + 870, + 415 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 845, + 408 + ], + [ + 846, + 455 + ], + [ + 850, + 455 + ], + [ + 848, + 367 + ], + [ + 844, + 367 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 845, + 363 + ], + [ + 842, + 367 + ], + [ + 842, + 368 + ], + [ + 846, + 368 + ], + [ + 853, + 366 + ], + [ + 850, + 362 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 776, + 437 + ], + [ + 776, + 431 + ], + [ + 783, + 425 + ], + [ + 789, + 417 + ], + [ + 789, + 408 + ], + [ + 787, + 402 + ], + [ + 784, + 392 + ], + [ + 779, + 381 + ], + [ + 757, + 393 + ], + [ + 748, + 395 + ], + [ + 746, + 402 + ], + [ + 745, + 416 + ], + [ + 751, + 424 + ], + [ + 758, + 429 + ], + [ + 772, + 430 + ], + [ + 772, + 452 + ], + [ + 776, + 454 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 780, + 311 + ], + [ + 766, + 329 + ], + [ + 780, + 328 + ], + [ + 760, + 334 + ], + [ + 749, + 339 + ], + [ + 747, + 314 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 801, + 309 + ], + [ + 843, + 308 + ], + [ + 842, + 341 + ], + [ + 823, + 328 + ], + [ + 794, + 332 + ], + [ + 796, + 327 + ], + [ + 814, + 325 + ], + [ + 796, + 311 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 622, + 44 + ], + [ + 673, + 125 + ], + [ + 678, + 126 + ], + [ + 681, + 121 + ], + [ + 685, + 121 + ], + [ + 688, + 121 + ], + [ + 690, + 149 + ], + [ + 694, + 155 + ], + [ + 692, + 136 + ], + [ + 695, + 136 + ], + [ + 697, + 155 + ], + [ + 701, + 152 + ], + [ + 705, + 152 + ], + [ + 706, + 174 + ], + [ + 711, + 181 + ], + [ + 711, + 177 + ], + [ + 715, + 176 + ], + [ + 717, + 180 + ], + [ + 717, + 190 + ], + [ + 727, + 198 + ], + [ + 738, + 216 + ], + [ + 742, + 226 + ], + [ + 734, + 230 + ], + [ + 740, + 236 + ], + [ + 734, + 240 + ], + [ + 736, + 254 + ], + [ + 746, + 263 + ], + [ + 736, + 275 + ], + [ + 744, + 449 + ], + [ + 744, + 485 + ], + [ + 704, + 494 + ], + [ + 613, + 527 + ], + [ + 593, + 532 + ], + [ + 434, + 547 + ], + [ + 226, + 587 + ], + [ + 0, + 629 + ], + [ + 0, + 0 + ], + [ + 597, + 0 + ], + [ + 602, + 22 + ], + [ + 613, + 26 + ], + [ + 618, + 38 + ], + [ + 616, + 42 + ], + [ + 616, + 44 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 588, + 412 + ], + [ + 764, + 432 + ], + [ + 769, + 468 + ], + [ + 722, + 466 + ], + [ + 634, + 476 + ], + [ + 585, + 480 + ], + [ + 405, + 494 + ], + [ + 403, + 413 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 320, + 399 + ], + [ + 319, + 528 + ], + [ + 187, + 529 + ], + [ + 176, + 384 + ], + [ + 264, + 397 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 210, + 406 + ], + [ + 209, + 416 + ], + [ + 212, + 436 + ], + [ + 215, + 444 + ], + [ + 223, + 444 + ], + [ + 225, + 436 + ], + [ + 227, + 422 + ], + [ + 225, + 407 + ], + [ + 223, + 397 + ], + [ + 217, + 396 + ], + [ + 213, + 399 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 925, + 415 + ], + [ + 934, + 415 + ], + [ + 934, + 440 + ], + [ + 922, + 440 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 941, + 344 + ], + [ + 943, + 445 + ], + [ + 947, + 445 + ], + [ + 942, + 340 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 933, + 338 + ], + [ + 942, + 340 + ], + [ + 942, + 342 + ], + [ + 934, + 343 + ], + [ + 929, + 344 + ], + [ + 927, + 341 + ], + [ + 926, + 338 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 979, + 383 + ], + [ + 978, + 316 + ], + [ + 981, + 316 + ], + [ + 983, + 387 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 976, + 311 + ], + [ + 980, + 314 + ], + [ + 976, + 317 + ], + [ + 966, + 316 + ], + [ + 965, + 311 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 969, + 436 + ], + [ + 965, + 312 + ], + [ + 969, + 312 + ], + [ + 973, + 441 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 952, + 309 + ], + [ + 963, + 309 + ], + [ + 966, + 314 + ], + [ + 962, + 316 + ], + [ + 956, + 316 + ], + [ + 950, + 313 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 948, + 400 + ], + [ + 945, + 405 + ], + [ + 940, + 404 + ], + [ + 939, + 399 + ], + [ + 942, + 394 + ], + [ + 945, + 395 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 998, + 343 + ], + [ + 1000, + 450 + ], + [ + 1004, + 450 + ], + [ + 1002, + 340 + ], + [ + 996, + 323 + ], + [ + 989, + 308 + ], + [ + 981, + 295 + ], + [ + 969, + 284 + ], + [ + 958, + 277 + ], + [ + 944, + 272 + ], + [ + 942, + 277 + ], + [ + 953, + 281 + ], + [ + 972, + 291 + ], + [ + 980, + 301 + ], + [ + 990, + 319 + ], + [ + 997, + 334 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 994, + 407 + ], + [ + 993, + 399 + ], + [ + 993, + 395 + ], + [ + 998, + 393 + ], + [ + 1003, + 394 + ], + [ + 1004, + 398 + ], + [ + 1004, + 408 + ], + [ + 1002, + 408 + ], + [ + 998, + 408 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1082, + 319 + ], + [ + 1086, + 509 + ], + [ + 1095, + 510 + ], + [ + 1088, + 303 + ], + [ + 1086, + 277 + ], + [ + 1076, + 244 + ], + [ + 1060, + 214 + ], + [ + 1032, + 187 + ], + [ + 1014, + 173 + ], + [ + 988, + 164 + ], + [ + 987, + 169 + ], + [ + 1007, + 175 + ], + [ + 1029, + 191 + ], + [ + 1049, + 209 + ], + [ + 1064, + 233 + ], + [ + 1075, + 262 + ], + [ + 1081, + 295 + ], + [ + 1082, + 308 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 920, + 265 + ], + [ + 916, + 272 + ], + [ + 919, + 273 + ], + [ + 920, + 276 + ], + [ + 923, + 279 + ], + [ + 928, + 278 + ], + [ + 934, + 275 + ], + [ + 943, + 277 + ], + [ + 944, + 273 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 952, + 145 + ], + [ + 943, + 157 + ], + [ + 949, + 160 + ], + [ + 950, + 166 + ], + [ + 955, + 169 + ], + [ + 965, + 169 + ], + [ + 970, + 164 + ], + [ + 989, + 170 + ], + [ + 991, + 164 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1059, + 368 + ], + [ + 1085, + 323 + ], + [ + 1112, + 367 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1135, + 519 + ], + [ + 1134, + 462 + ], + [ + 1130, + 462 + ], + [ + 1130, + 520 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1174, + 530 + ], + [ + 1179, + 530 + ], + [ + 1179, + 463 + ], + [ + 1174, + 464 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1212, + 533 + ], + [ + 1216, + 531 + ], + [ + 1211, + 468 + ], + [ + 1208, + 469 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1245, + 538 + ], + [ + 1249, + 538 + ], + [ + 1248, + 467 + ], + [ + 1244, + 467 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 818, + 436 + ], + [ + 824, + 438 + ], + [ + 831, + 444 + ], + [ + 833, + 453 + ], + [ + 830, + 456 + ], + [ + 806, + 458 + ], + [ + 798, + 446 + ], + [ + 796, + 443 + ], + [ + 797, + 440 + ], + [ + 802, + 436 + ], + [ + 808, + 436 + ], + [ + 814, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 833, + 469 + ], + [ + 833, + 477 + ], + [ + 833, + 479 + ], + [ + 826, + 479 + ], + [ + 824, + 476 + ], + [ + 811, + 476 + ], + [ + 802, + 460 + ], + [ + 803, + 450 + ], + [ + 804, + 444 + ], + [ + 809, + 443 + ], + [ + 818, + 443 + ], + [ + 823, + 445 + ], + [ + 827, + 449 + ], + [ + 829, + 451 + ], + [ + 832, + 451 + ], + [ + 833, + 454 + ], + [ + 830, + 456 + ], + [ + 833, + 461 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 788, + 441 + ], + [ + 801, + 440 + ], + [ + 805, + 444 + ], + [ + 810, + 452 + ], + [ + 816, + 451 + ], + [ + 819, + 452 + ], + [ + 818, + 456 + ], + [ + 815, + 458 + ], + [ + 819, + 469 + ], + [ + 820, + 479 + ], + [ + 820, + 483 + ], + [ + 819, + 489 + ], + [ + 814, + 491 + ], + [ + 809, + 480 + ], + [ + 799, + 463 + ], + [ + 792, + 449 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 814, + 504 + ], + [ + 810, + 504 + ], + [ + 793, + 473 + ], + [ + 784, + 449 + ], + [ + 791, + 438 + ], + [ + 800, + 445 + ], + [ + 803, + 450 + ], + [ + 805, + 456 + ], + [ + 809, + 457 + ], + [ + 815, + 460 + ], + [ + 815, + 464 + ], + [ + 810, + 467 + ], + [ + 812, + 476 + ], + [ + 814, + 484 + ], + [ + 815, + 493 + ], + [ + 815, + 500 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 795, + 502 + ], + [ + 742, + 507 + ], + [ + 737, + 511 + ], + [ + 728, + 512 + ], + [ + 724, + 499 + ], + [ + 725, + 479 + ], + [ + 728, + 470 + ], + [ + 724, + 470 + ], + [ + 720, + 467 + ], + [ + 720, + 461 + ], + [ + 727, + 462 + ], + [ + 730, + 461 + ], + [ + 737, + 444 + ], + [ + 745, + 440 + ], + [ + 784, + 439 + ], + [ + 788, + 439 + ], + [ + 796, + 446 + ], + [ + 800, + 458 + ], + [ + 805, + 456 + ], + [ + 808, + 456 + ], + [ + 810, + 459 + ], + [ + 809, + 461 + ], + [ + 804, + 464 + ], + [ + 807, + 476 + ], + [ + 808, + 483 + ], + [ + 809, + 495 + ], + [ + 808, + 502 + ], + [ + 807, + 507 + ], + [ + 801, + 508 + ], + [ + 797, + 505 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 902, + 525 + ], + [ + 898, + 516 + ], + [ + 897, + 500 + ], + [ + 897, + 489 + ], + [ + 898, + 480 + ], + [ + 903, + 470 + ], + [ + 904, + 466 + ], + [ + 891, + 466 + ], + [ + 890, + 463 + ], + [ + 893, + 458 + ], + [ + 901, + 457 + ], + [ + 906, + 462 + ], + [ + 910, + 455 + ], + [ + 914, + 443 + ], + [ + 920, + 436 + ], + [ + 927, + 432 + ], + [ + 934, + 429 + ], + [ + 995, + 430 + ], + [ + 1001, + 432 + ], + [ + 1014, + 450 + ], + [ + 1020, + 463 + ], + [ + 1028, + 487 + ], + [ + 1027, + 509 + ], + [ + 1026, + 532 + ], + [ + 1010, + 531 + ], + [ + 1010, + 526 + ], + [ + 1000, + 524 + ], + [ + 999, + 520 + ], + [ + 982, + 519 + ], + [ + 967, + 519 + ], + [ + 937, + 519 + ], + [ + 931, + 520 + ], + [ + 925, + 521 + ], + [ + 919, + 519 + ], + [ + 918, + 533 + ], + [ + 904, + 533 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 475, + 434 + ], + [ + 510, + 434 + ], + [ + 522, + 436 + ], + [ + 526, + 433 + ], + [ + 538, + 434 + ], + [ + 550, + 438 + ], + [ + 560, + 442 + ], + [ + 568, + 457 + ], + [ + 575, + 477 + ], + [ + 583, + 491 + ], + [ + 582, + 507 + ], + [ + 582, + 526 + ], + [ + 584, + 550 + ], + [ + 582, + 571 + ], + [ + 580, + 574 + ], + [ + 565, + 577 + ], + [ + 556, + 576 + ], + [ + 554, + 561 + ], + [ + 542, + 563 + ], + [ + 542, + 571 + ], + [ + 538, + 579 + ], + [ + 537, + 586 + ], + [ + 530, + 587 + ], + [ + 509, + 588 + ], + [ + 505, + 586 + ], + [ + 500, + 576 + ], + [ + 477, + 576 + ], + [ + 449, + 575 + ], + [ + 412, + 575 + ], + [ + 384, + 576 + ], + [ + 374, + 557 + ], + [ + 367, + 503 + ], + [ + 376, + 475 + ], + [ + 386, + 464 + ], + [ + 395, + 454 + ], + [ + 410, + 445 + ], + [ + 421, + 436 + ], + [ + 428, + 432 + ], + [ + 439, + 432 + ], + [ + 456, + 435 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 343, + 450 + ], + [ + 369, + 451 + ], + [ + 382, + 455 + ], + [ + 387, + 460 + ], + [ + 388, + 472 + ], + [ + 391, + 480 + ], + [ + 398, + 468 + ], + [ + 404, + 464 + ], + [ + 411, + 465 + ], + [ + 415, + 469 + ], + [ + 408, + 475 + ], + [ + 403, + 474 + ], + [ + 397, + 477 + ], + [ + 390, + 486 + ], + [ + 394, + 491 + ], + [ + 403, + 492 + ], + [ + 410, + 497 + ], + [ + 412, + 500 + ], + [ + 416, + 507 + ], + [ + 416, + 516 + ], + [ + 417, + 533 + ], + [ + 418, + 540 + ], + [ + 424, + 544 + ], + [ + 419, + 547 + ], + [ + 415, + 559 + ], + [ + 416, + 563 + ], + [ + 409, + 568 + ], + [ + 403, + 575 + ], + [ + 404, + 585 + ], + [ + 403, + 593 + ], + [ + 399, + 599 + ], + [ + 401, + 600 + ], + [ + 408, + 604 + ], + [ + 402, + 605 + ], + [ + 391, + 607 + ], + [ + 388, + 601 + ], + [ + 379, + 598 + ], + [ + 377, + 595 + ], + [ + 371, + 597 + ], + [ + 367, + 609 + ], + [ + 359, + 616 + ], + [ + 351, + 616 + ], + [ + 332, + 590 + ], + [ + 328, + 552 + ], + [ + 327, + 518 + ], + [ + 327, + 491 + ], + [ + 327, + 480 + ], + [ + 328, + 473 + ], + [ + 330, + 470 + ], + [ + 324, + 461 + ], + [ + 313, + 458 + ], + [ + 311, + 454 + ], + [ + 313, + 448 + ], + [ + 326, + 452 + ], + [ + 331, + 455 + ], + [ + 329, + 463 + ], + [ + 334, + 472 + ], + [ + 337, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 101, + 442 + ], + [ + 162, + 438 + ], + [ + 192, + 437 + ], + [ + 220, + 439 + ], + [ + 266, + 443 + ], + [ + 303, + 458 + ], + [ + 323, + 477 + ], + [ + 351, + 513 + ], + [ + 354, + 529 + ], + [ + 355, + 555 + ], + [ + 355, + 567 + ], + [ + 354, + 597 + ], + [ + 352, + 605 + ], + [ + 351, + 631 + ], + [ + 346, + 656 + ], + [ + 341, + 663 + ], + [ + 317, + 663 + ], + [ + 304, + 663 + ], + [ + 301, + 651 + ], + [ + 299, + 636 + ], + [ + 238, + 657 + ], + [ + 235, + 679 + ], + [ + 224, + 699 + ], + [ + 201, + 710 + ], + [ + 186, + 711 + ], + [ + 167, + 711 + ], + [ + 159, + 709 + ], + [ + 150, + 700 + ], + [ + 141, + 684 + ], + [ + 140, + 683 + ], + [ + 100, + 685 + ], + [ + 65, + 687 + ], + [ + 32, + 690 + ], + [ + 0, + 689 + ], + [ + 0, + 469 + ], + [ + 0, + 464 + ], + [ + 28, + 450 + ], + [ + 54, + 446 + ], + [ + 80, + 441 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 948, + 487 + ], + [ + 988, + 488 + ], + [ + 989, + 479 + ], + [ + 949, + 478 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000106_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000106_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d7f1fa943a2362c88999603bdf3bd6af8e760d91 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000106_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000107_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000107_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..5ba23d04a11c8210c0bd09ca9994744bb84c9703 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000107_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000107_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000107_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..dda88dffe483a8524035173a39f29540d904543e --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000107_000019_gtFine_polygons.json @@ -0,0 +1,7386 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 544, + 6 + ], + [ + 952, + 341 + ], + [ + 1333, + 367 + ], + [ + 1635, + 352 + ], + [ + 1805, + 0 + ], + [ + 535, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1970, + 798 + ], + [ + 1254, + 487 + ], + [ + 915, + 500 + ], + [ + 519, + 507 + ], + [ + 0, + 525 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 830 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1965, + 871 + ], + [ + 1608, + 698 + ], + [ + 1540, + 666 + ], + [ + 1497, + 639 + ], + [ + 1446, + 610 + ], + [ + 1394, + 589 + ], + [ + 1327, + 571 + ], + [ + 1252, + 558 + ], + [ + 1182, + 552 + ], + [ + 1123, + 542 + ], + [ + 1103, + 537 + ], + [ + 1083, + 535 + ], + [ + 1169, + 531 + ], + [ + 1106, + 532 + ], + [ + 1109, + 523 + ], + [ + 1280, + 513 + ], + [ + 1413, + 505 + ], + [ + 1430, + 503 + ], + [ + 1584, + 497 + ], + [ + 2003, + 518 + ], + [ + 2048, + 519 + ], + [ + 2048, + 906 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 558, + 536 + ], + [ + 463, + 536 + ], + [ + 374, + 539 + ], + [ + 317, + 540 + ], + [ + 0, + 546 + ], + [ + 0, + 497 + ], + [ + 213, + 498 + ], + [ + 581, + 513 + ], + [ + 907, + 506 + ], + [ + 1004, + 506 + ], + [ + 982, + 522 + ], + [ + 947, + 523 + ], + [ + 882, + 522 + ], + [ + 831, + 524 + ], + [ + 792, + 528 + ], + [ + 758, + 529 + ], + [ + 697, + 533 + ], + [ + 620, + 535 + ], + [ + 593, + 533 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1059, + 144 + ], + [ + 1054, + 291 + ], + [ + 1086, + 299 + ], + [ + 1077, + 143 + ], + [ + 1108, + 77 + ], + [ + 1106, + 74 + ], + [ + 1089, + 71 + ], + [ + 1088, + 67 + ], + [ + 1102, + 61 + ], + [ + 1102, + 58 + ], + [ + 1100, + 55 + ], + [ + 1077, + 53 + ], + [ + 1076, + 47 + ], + [ + 1098, + 42 + ], + [ + 1098, + 39 + ], + [ + 1094, + 36 + ], + [ + 1072, + 32 + ], + [ + 1071, + 0 + ], + [ + 1058, + 0 + ], + [ + 1059, + 35 + ], + [ + 1041, + 35 + ], + [ + 1036, + 39 + ], + [ + 1034, + 41 + ], + [ + 1039, + 47 + ], + [ + 1054, + 49 + ], + [ + 1054, + 54 + ], + [ + 1040, + 55 + ], + [ + 1031, + 59 + ], + [ + 1028, + 62 + ], + [ + 1031, + 64 + ], + [ + 1042, + 69 + ], + [ + 1045, + 72 + ], + [ + 1031, + 74 + ], + [ + 1025, + 77 + ], + [ + 1022, + 79 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 950, + 258 + ], + [ + 1021, + 256 + ], + [ + 1078, + 289 + ], + [ + 1114, + 287 + ], + [ + 1115, + 310 + ], + [ + 1122, + 311 + ], + [ + 1136, + 319 + ], + [ + 1172, + 316 + ], + [ + 1467, + 318 + ], + [ + 1496, + 311 + ], + [ + 1514, + 304 + ], + [ + 1516, + 275 + ], + [ + 1543, + 264 + ], + [ + 1567, + 261 + ], + [ + 1571, + 261 + ], + [ + 1581, + 104 + ], + [ + 1618, + 97 + ], + [ + 1618, + 83 + ], + [ + 1602, + 78 + ], + [ + 1618, + 8 + ], + [ + 1616, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 489 + ], + [ + 1867, + 515 + ], + [ + 1543, + 491 + ], + [ + 1536, + 508 + ], + [ + 1513, + 507 + ], + [ + 1487, + 506 + ], + [ + 1463, + 506 + ], + [ + 1423, + 506 + ], + [ + 1405, + 508 + ], + [ + 1312, + 496 + ], + [ + 1216, + 508 + ], + [ + 1182, + 507 + ], + [ + 1148, + 507 + ], + [ + 1091, + 500 + ], + [ + 1033, + 496 + ], + [ + 919, + 503 + ], + [ + 923, + 258 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1365, + 165 + ], + [ + 1375, + 175 + ], + [ + 1381, + 173 + ], + [ + 1386, + 174 + ], + [ + 1397, + 180 + ], + [ + 1409, + 184 + ], + [ + 1418, + 189 + ], + [ + 1428, + 199 + ], + [ + 1432, + 196 + ], + [ + 1438, + 199 + ], + [ + 1446, + 207 + ], + [ + 1451, + 207 + ], + [ + 1454, + 213 + ], + [ + 1460, + 214 + ], + [ + 1462, + 211 + ], + [ + 1468, + 211 + ], + [ + 1475, + 217 + ], + [ + 1477, + 221 + ], + [ + 1485, + 228 + ], + [ + 1489, + 238 + ], + [ + 1490, + 244 + ], + [ + 1488, + 249 + ], + [ + 1480, + 249 + ], + [ + 1469, + 249 + ], + [ + 1470, + 255 + ], + [ + 1475, + 256 + ], + [ + 1484, + 256 + ], + [ + 1491, + 254 + ], + [ + 1498, + 252 + ], + [ + 1504, + 256 + ], + [ + 1515, + 264 + ], + [ + 1535, + 270 + ], + [ + 1536, + 285 + ], + [ + 1531, + 288 + ], + [ + 1527, + 290 + ], + [ + 1538, + 299 + ], + [ + 1538, + 311 + ], + [ + 1522, + 317 + ], + [ + 1515, + 323 + ], + [ + 1515, + 338 + ], + [ + 1515, + 351 + ], + [ + 1502, + 356 + ], + [ + 1504, + 367 + ], + [ + 1500, + 377 + ], + [ + 1493, + 390 + ], + [ + 1489, + 406 + ], + [ + 1492, + 424 + ], + [ + 1484, + 435 + ], + [ + 1470, + 437 + ], + [ + 1457, + 448 + ], + [ + 1450, + 466 + ], + [ + 1442, + 476 + ], + [ + 1436, + 483 + ], + [ + 1431, + 490 + ], + [ + 1417, + 487 + ], + [ + 1335, + 477 + ], + [ + 1202, + 483 + ], + [ + 1181, + 483 + ], + [ + 1171, + 485 + ], + [ + 1171, + 478 + ], + [ + 1155, + 477 + ], + [ + 1127, + 462 + ], + [ + 1118, + 453 + ], + [ + 1100, + 450 + ], + [ + 1102, + 422 + ], + [ + 1104, + 407 + ], + [ + 1114, + 406 + ], + [ + 1124, + 411 + ], + [ + 1124, + 404 + ], + [ + 1119, + 394 + ], + [ + 1121, + 378 + ], + [ + 1105, + 381 + ], + [ + 1109, + 373 + ], + [ + 1134, + 367 + ], + [ + 1148, + 363 + ], + [ + 1147, + 353 + ], + [ + 1134, + 342 + ], + [ + 1137, + 326 + ], + [ + 1139, + 312 + ], + [ + 1141, + 304 + ], + [ + 1149, + 288 + ], + [ + 1169, + 278 + ], + [ + 1192, + 270 + ], + [ + 1199, + 266 + ], + [ + 1213, + 264 + ], + [ + 1230, + 268 + ], + [ + 1234, + 259 + ], + [ + 1237, + 251 + ], + [ + 1232, + 238 + ], + [ + 1260, + 208 + ], + [ + 1275, + 193 + ], + [ + 1279, + 179 + ], + [ + 1295, + 172 + ], + [ + 1308, + 167 + ], + [ + 1315, + 160 + ], + [ + 1327, + 164 + ], + [ + 1340, + 159 + ], + [ + 1348, + 162 + ], + [ + 1356, + 162 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1471, + 478 + ], + [ + 1469, + 507 + ], + [ + 1430, + 508 + ], + [ + 1431, + 479 + ], + [ + 1441, + 475 + ], + [ + 1455, + 475 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1053, + 319 + ], + [ + 1053, + 354 + ], + [ + 1070, + 354 + ], + [ + 1070, + 321 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1422, + 463 + ], + [ + 1422, + 476 + ], + [ + 1405, + 468 + ], + [ + 1403, + 459 + ], + [ + 1398, + 456 + ], + [ + 1394, + 464 + ], + [ + 1393, + 465 + ], + [ + 1392, + 450 + ], + [ + 1353, + 450 + ], + [ + 1357, + 510 + ], + [ + 1378, + 510 + ], + [ + 1398, + 507 + ], + [ + 1418, + 505 + ], + [ + 1430, + 505 + ], + [ + 1428, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1371, + 500 + ], + [ + 1365, + 495 + ], + [ + 1360, + 494 + ], + [ + 1357, + 506 + ], + [ + 1360, + 512 + ], + [ + 1370, + 512 + ], + [ + 1377, + 511 + ], + [ + 1377, + 505 + ], + [ + 1377, + 504 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1388, + 464 + ], + [ + 1384, + 467 + ], + [ + 1384, + 474 + ], + [ + 1383, + 482 + ], + [ + 1386, + 507 + ], + [ + 1391, + 512 + ], + [ + 1391, + 504 + ], + [ + 1391, + 494 + ], + [ + 1392, + 477 + ], + [ + 1393, + 472 + ], + [ + 1393, + 467 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1395, + 482 + ], + [ + 1401, + 486 + ], + [ + 1402, + 493 + ], + [ + 1401, + 501 + ], + [ + 1399, + 505 + ], + [ + 1399, + 509 + ], + [ + 1398, + 513 + ], + [ + 1392, + 513 + ], + [ + 1391, + 506 + ], + [ + 1391, + 495 + ], + [ + 1392, + 488 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1396, + 475 + ], + [ + 1397, + 482 + ], + [ + 1400, + 484 + ], + [ + 1401, + 488 + ], + [ + 1397, + 491 + ], + [ + 1394, + 492 + ], + [ + 1394, + 498 + ], + [ + 1396, + 507 + ], + [ + 1397, + 513 + ], + [ + 1392, + 516 + ], + [ + 1391, + 513 + ], + [ + 1389, + 501 + ], + [ + 1390, + 511 + ], + [ + 1390, + 513 + ], + [ + 1383, + 513 + ], + [ + 1383, + 502 + ], + [ + 1382, + 493 + ], + [ + 1383, + 484 + ], + [ + 1385, + 477 + ], + [ + 1388, + 473 + ], + [ + 1391, + 470 + ], + [ + 1394, + 469 + ], + [ + 1397, + 469 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1056, + 438 + ], + [ + 1051, + 433 + ], + [ + 1051, + 421 + ], + [ + 1052, + 410 + ], + [ + 1057, + 407 + ], + [ + 1064, + 407 + ], + [ + 1065, + 418 + ], + [ + 1068, + 421 + ], + [ + 1075, + 422 + ], + [ + 1074, + 446 + ], + [ + 1065, + 445 + ], + [ + 1063, + 437 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1405, + 517 + ], + [ + 1397, + 516 + ], + [ + 1397, + 458 + ], + [ + 1406, + 456 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 1084, + 462 + ], + [ + 1084, + 507 + ], + [ + 1142, + 506 + ], + [ + 1157, + 507 + ], + [ + 1156, + 454 + ], + [ + 1132, + 454 + ], + [ + 1129, + 457 + ], + [ + 1104, + 458 + ], + [ + 1104, + 461 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1161, + 447 + ], + [ + 1154, + 447 + ], + [ + 1155, + 520 + ], + [ + 1163, + 521 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1231, + 459 + ], + [ + 1225, + 464 + ], + [ + 1214, + 472 + ], + [ + 1206, + 477 + ], + [ + 1190, + 477 + ], + [ + 1179, + 475 + ], + [ + 1169, + 467 + ], + [ + 1161, + 460 + ], + [ + 1158, + 458 + ], + [ + 1157, + 461 + ], + [ + 1162, + 471 + ], + [ + 1169, + 475 + ], + [ + 1176, + 478 + ], + [ + 1195, + 482 + ], + [ + 1206, + 479 + ], + [ + 1218, + 477 + ], + [ + 1223, + 469 + ], + [ + 1228, + 465 + ], + [ + 1233, + 465 + ], + [ + 1238, + 470 + ], + [ + 1245, + 475 + ], + [ + 1254, + 477 + ], + [ + 1263, + 479 + ], + [ + 1269, + 481 + ], + [ + 1353, + 483 + ], + [ + 1366, + 482 + ], + [ + 1383, + 478 + ], + [ + 1395, + 473 + ], + [ + 1400, + 470 + ], + [ + 1407, + 475 + ], + [ + 1423, + 482 + ], + [ + 1452, + 489 + ], + [ + 1464, + 483 + ], + [ + 1478, + 473 + ], + [ + 1477, + 470 + ], + [ + 1471, + 474 + ], + [ + 1457, + 478 + ], + [ + 1437, + 483 + ], + [ + 1424, + 480 + ], + [ + 1409, + 472 + ], + [ + 1399, + 463 + ], + [ + 1387, + 469 + ], + [ + 1376, + 477 + ], + [ + 1365, + 478 + ], + [ + 1343, + 479 + ], + [ + 1266, + 476 + ], + [ + 1258, + 475 + ], + [ + 1246, + 469 + ], + [ + 1236, + 463 + ] + ] + }, + { + "label": "bus", + "polygon": [ + [ + 1217, + 450 + ], + [ + 1213, + 455 + ], + [ + 1214, + 512 + ], + [ + 1257, + 512 + ], + [ + 1274, + 511 + ], + [ + 1265, + 448 + ], + [ + 1258, + 450 + ], + [ + 1256, + 446 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1209, + 441 + ], + [ + 1179, + 439 + ], + [ + 1179, + 508 + ], + [ + 1182, + 508 + ], + [ + 1184, + 450 + ], + [ + 1185, + 447 + ], + [ + 1204, + 446 + ], + [ + 1205, + 510 + ], + [ + 1208, + 510 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1020, + 449 + ], + [ + 1017, + 469 + ], + [ + 1041, + 473 + ], + [ + 1041, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1030, + 377 + ], + [ + 1031, + 483 + ], + [ + 1032, + 483 + ], + [ + 1032, + 374 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1043, + 374 + ], + [ + 1033, + 374 + ], + [ + 1030, + 376 + ], + [ + 1030, + 378 + ], + [ + 1036, + 378 + ], + [ + 1046, + 377 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1002, + 448 + ], + [ + 1001, + 464 + ], + [ + 1016, + 466 + ], + [ + 1015, + 447 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 890, + 214 + ], + [ + 952, + 245 + ], + [ + 958, + 512 + ], + [ + 865, + 512 + ], + [ + 792, + 514 + ], + [ + 718, + 520 + ], + [ + 671, + 520 + ], + [ + 616, + 521 + ], + [ + 564, + 524 + ], + [ + 541, + 524 + ], + [ + 349, + 527 + ], + [ + 260, + 527 + ], + [ + 124, + 518 + ], + [ + 95, + 515 + ], + [ + 72, + 513 + ], + [ + 31, + 514 + ], + [ + 0, + 516 + ], + [ + 0, + 0 + ], + [ + 806, + 0 + ], + [ + 812, + 3 + ], + [ + 900, + 68 + ], + [ + 896, + 103 + ], + [ + 902, + 104 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 967, + 489 + ], + [ + 958, + 489 + ], + [ + 943, + 488 + ], + [ + 927, + 488 + ], + [ + 919, + 488 + ], + [ + 919, + 495 + ], + [ + 930, + 505 + ], + [ + 945, + 512 + ], + [ + 955, + 514 + ], + [ + 961, + 513 + ], + [ + 961, + 505 + ], + [ + 965, + 496 + ], + [ + 963, + 491 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 906, + 490 + ], + [ + 927, + 490 + ], + [ + 935, + 492 + ], + [ + 941, + 502 + ], + [ + 939, + 509 + ], + [ + 938, + 514 + ], + [ + 930, + 515 + ], + [ + 920, + 512 + ], + [ + 905, + 509 + ], + [ + 893, + 506 + ], + [ + 889, + 493 + ], + [ + 892, + 490 + ], + [ + 898, + 490 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 896, + 515 + ], + [ + 884, + 515 + ], + [ + 867, + 505 + ], + [ + 858, + 491 + ], + [ + 867, + 488 + ], + [ + 879, + 493 + ], + [ + 892, + 493 + ], + [ + 902, + 503 + ], + [ + 906, + 509 + ], + [ + 907, + 514 + ], + [ + 900, + 516 + ], + [ + 892, + 516 + ], + [ + 890, + 516 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 863, + 517 + ], + [ + 858, + 517 + ], + [ + 850, + 512 + ], + [ + 842, + 497 + ], + [ + 853, + 491 + ], + [ + 864, + 492 + ], + [ + 869, + 500 + ], + [ + 879, + 501 + ], + [ + 882, + 509 + ], + [ + 879, + 517 + ], + [ + 868, + 517 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 985, + 478 + ], + [ + 966, + 479 + ], + [ + 961, + 490 + ], + [ + 957, + 491 + ], + [ + 955, + 494 + ], + [ + 959, + 496 + ], + [ + 954, + 505 + ], + [ + 954, + 511 + ], + [ + 958, + 516 + ], + [ + 970, + 516 + ], + [ + 990, + 511 + ], + [ + 992, + 493 + ], + [ + 989, + 479 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 976, + 460 + ], + [ + 1017, + 460 + ], + [ + 1025, + 464 + ], + [ + 1041, + 464 + ], + [ + 1042, + 509 + ], + [ + 978, + 519 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1069, + 504 + ], + [ + 1060, + 504 + ], + [ + 1058, + 159 + ], + [ + 1062, + 159 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1072, + 482 + ], + [ + 1085, + 481 + ], + [ + 1084, + 507 + ], + [ + 1040, + 506 + ], + [ + 1043, + 478 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1050, + 523 + ], + [ + 1019, + 523 + ], + [ + 993, + 524 + ], + [ + 968, + 524 + ], + [ + 963, + 519 + ], + [ + 972, + 514 + ], + [ + 987, + 511 + ], + [ + 1002, + 510 + ], + [ + 1022, + 506 + ], + [ + 1033, + 505 + ], + [ + 1044, + 505 + ], + [ + 1059, + 503 + ], + [ + 1076, + 504 + ], + [ + 1090, + 505 + ], + [ + 1106, + 506 + ], + [ + 1125, + 506 + ], + [ + 1144, + 507 + ], + [ + 1159, + 505 + ], + [ + 1169, + 505 + ], + [ + 1183, + 508 + ], + [ + 1198, + 508 + ], + [ + 1211, + 506 + ], + [ + 1221, + 506 + ], + [ + 1245, + 506 + ], + [ + 1263, + 506 + ], + [ + 1274, + 506 + ], + [ + 1274, + 512 + ], + [ + 1264, + 518 + ], + [ + 1243, + 518 + ], + [ + 1223, + 518 + ], + [ + 1208, + 519 + ], + [ + 1187, + 519 + ], + [ + 1161, + 519 + ], + [ + 1122, + 521 + ], + [ + 1102, + 522 + ], + [ + 1081, + 522 + ], + [ + 1057, + 522 + ], + [ + 1021, + 524 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1245, + 190 + ], + [ + 1247, + 212 + ], + [ + 1261, + 215 + ], + [ + 1262, + 522 + ], + [ + 1365, + 525 + ], + [ + 1357, + 210 + ], + [ + 1367, + 208 + ], + [ + 1369, + 185 + ], + [ + 1335, + 181 + ], + [ + 1301, + 180 + ], + [ + 1270, + 184 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1048, + 410 + ], + [ + 1032, + 435 + ], + [ + 1059, + 435 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1056, + 392 + ], + [ + 1049, + 395 + ], + [ + 1045, + 399 + ], + [ + 1045, + 406 + ], + [ + 1046, + 409 + ], + [ + 1053, + 409 + ], + [ + 1059, + 397 + ], + [ + 1059, + 394 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1071, + 407 + ], + [ + 1070, + 436 + ], + [ + 1077, + 436 + ], + [ + 1078, + 435 + ], + [ + 1083, + 433 + ], + [ + 1082, + 431 + ], + [ + 1078, + 428 + ], + [ + 1078, + 426 + ], + [ + 1078, + 424 + ], + [ + 1082, + 423 + ], + [ + 1082, + 421 + ], + [ + 1077, + 418 + ], + [ + 1077, + 416 + ], + [ + 1078, + 413 + ], + [ + 1083, + 413 + ], + [ + 1083, + 410 + ], + [ + 1076, + 408 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1182, + 165 + ], + [ + 1187, + 436 + ], + [ + 1185, + 442 + ], + [ + 1185, + 518 + ], + [ + 1203, + 520 + ], + [ + 1201, + 439 + ], + [ + 1197, + 436 + ], + [ + 1190, + 166 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1506, + 474 + ], + [ + 1492, + 481 + ], + [ + 1488, + 489 + ], + [ + 1474, + 503 + ], + [ + 1480, + 507 + ], + [ + 1502, + 505 + ], + [ + 1518, + 505 + ], + [ + 1519, + 479 + ], + [ + 1515, + 477 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1506, + 474 + ], + [ + 1492, + 481 + ], + [ + 1488, + 489 + ], + [ + 1474, + 503 + ], + [ + 1480, + 507 + ], + [ + 1502, + 505 + ], + [ + 1518, + 505 + ], + [ + 1519, + 479 + ], + [ + 1515, + 477 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1494, + 516 + ], + [ + 1493, + 487 + ], + [ + 1498, + 487 + ], + [ + 1499, + 513 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1984, + 367 + ], + [ + 1982, + 429 + ], + [ + 1987, + 430 + ], + [ + 1987, + 367 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1970, + 352 + ], + [ + 2005, + 349 + ], + [ + 1988, + 378 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1973, + 381 + ], + [ + 1966, + 387 + ], + [ + 1968, + 389 + ], + [ + 1973, + 392 + ], + [ + 1973, + 394 + ], + [ + 1970, + 394 + ], + [ + 1969, + 397 + ], + [ + 1972, + 400 + ], + [ + 1975, + 402 + ], + [ + 1973, + 405 + ], + [ + 1972, + 405 + ], + [ + 1969, + 408 + ], + [ + 1970, + 411 + ], + [ + 1973, + 413 + ], + [ + 1982, + 415 + ], + [ + 1984, + 415 + ], + [ + 1983, + 379 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1841, + 182 + ], + [ + 1816, + 176 + ], + [ + 1794, + 174 + ], + [ + 1774, + 175 + ], + [ + 1755, + 179 + ], + [ + 1746, + 190 + ], + [ + 1746, + 201 + ], + [ + 1752, + 211 + ], + [ + 1745, + 215 + ], + [ + 1724, + 208 + ], + [ + 1695, + 208 + ], + [ + 1672, + 209 + ], + [ + 1665, + 238 + ], + [ + 1670, + 260 + ], + [ + 1682, + 270 + ], + [ + 1682, + 279 + ], + [ + 1675, + 293 + ], + [ + 1664, + 318 + ], + [ + 1658, + 345 + ], + [ + 1643, + 359 + ], + [ + 1636, + 374 + ], + [ + 1644, + 389 + ], + [ + 1659, + 393 + ], + [ + 1666, + 405 + ], + [ + 1685, + 410 + ], + [ + 1704, + 413 + ], + [ + 1721, + 420 + ], + [ + 1741, + 415 + ], + [ + 1742, + 397 + ], + [ + 1821, + 402 + ], + [ + 1908, + 400 + ], + [ + 1924, + 381 + ], + [ + 1937, + 339 + ], + [ + 1960, + 269 + ], + [ + 1972, + 256 + ], + [ + 1972, + 243 + ], + [ + 1969, + 223 + ], + [ + 1952, + 207 + ], + [ + 1919, + 193 + ], + [ + 1864, + 184 + ], + [ + 1861, + 183 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1765, + 389 + ], + [ + 1761, + 131 + ], + [ + 1774, + 133 + ], + [ + 1777, + 393 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1839, + 395 + ], + [ + 1843, + 137 + ], + [ + 1868, + 133 + ], + [ + 1861, + 397 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2015, + 429 + ], + [ + 2012, + 123 + ], + [ + 2026, + 106 + ], + [ + 2027, + 431 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1902, + 452 + ], + [ + 1895, + 416 + ], + [ + 1887, + 385 + ], + [ + 1872, + 381 + ], + [ + 1852, + 380 + ], + [ + 1847, + 363 + ], + [ + 1847, + 350 + ], + [ + 1832, + 339 + ], + [ + 1789, + 337 + ], + [ + 1778, + 331 + ], + [ + 1758, + 320 + ], + [ + 1753, + 317 + ], + [ + 1736, + 294 + ], + [ + 1714, + 253 + ], + [ + 1732, + 235 + ], + [ + 1759, + 214 + ], + [ + 1768, + 217 + ], + [ + 1778, + 212 + ], + [ + 1792, + 193 + ], + [ + 1790, + 176 + ], + [ + 1788, + 162 + ], + [ + 1777, + 149 + ], + [ + 1768, + 145 + ], + [ + 1757, + 146 + ], + [ + 1736, + 152 + ], + [ + 1722, + 161 + ], + [ + 1712, + 159 + ], + [ + 1718, + 145 + ], + [ + 1705, + 147 + ], + [ + 1685, + 150 + ], + [ + 1667, + 149 + ], + [ + 1636, + 126 + ], + [ + 1626, + 97 + ], + [ + 1608, + 83 + ], + [ + 1597, + 80 + ], + [ + 1588, + 66 + ], + [ + 1588, + 51 + ], + [ + 1596, + 42 + ], + [ + 1603, + 30 + ], + [ + 1559, + 8 + ], + [ + 1571, + 7 + ], + [ + 1595, + 15 + ], + [ + 1612, + 7 + ], + [ + 1621, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 292 + ], + [ + 2046, + 292 + ], + [ + 2027, + 286 + ], + [ + 2038, + 271 + ], + [ + 2027, + 258 + ], + [ + 2041, + 243 + ], + [ + 2038, + 233 + ], + [ + 2021, + 236 + ], + [ + 2002, + 236 + ], + [ + 2001, + 248 + ], + [ + 1986, + 253 + ], + [ + 1965, + 256 + ], + [ + 1952, + 263 + ], + [ + 1943, + 267 + ], + [ + 1944, + 353 + ], + [ + 1943, + 369 + ], + [ + 1953, + 367 + ], + [ + 1954, + 376 + ], + [ + 1944, + 382 + ], + [ + 1938, + 409 + ], + [ + 1931, + 427 + ], + [ + 1936, + 458 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1743, + 466 + ], + [ + 1743, + 381 + ], + [ + 1778, + 375 + ], + [ + 1907, + 373 + ], + [ + 1909, + 373 + ], + [ + 1912, + 457 + ], + [ + 1912, + 463 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1646, + 421 + ], + [ + 1646, + 477 + ], + [ + 1651, + 476 + ], + [ + 1648, + 414 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1633, + 407 + ], + [ + 1661, + 405 + ], + [ + 1648, + 434 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1684, + 420 + ], + [ + 1683, + 474 + ], + [ + 1689, + 474 + ], + [ + 1687, + 416 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1691, + 439 + ], + [ + 1695, + 439 + ], + [ + 1697, + 438 + ], + [ + 1703, + 436 + ], + [ + 1696, + 434 + ], + [ + 1695, + 430 + ], + [ + 1698, + 427 + ], + [ + 1700, + 424 + ], + [ + 1697, + 423 + ], + [ + 1695, + 421 + ], + [ + 1698, + 419 + ], + [ + 1701, + 419 + ], + [ + 1700, + 415 + ], + [ + 1693, + 414 + ], + [ + 1688, + 414 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1701, + 387 + ], + [ + 1672, + 384 + ], + [ + 1674, + 375 + ], + [ + 1700, + 378 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1672, + 352 + ], + [ + 1683, + 353 + ], + [ + 1689, + 354 + ], + [ + 1687, + 358 + ], + [ + 1681, + 357 + ], + [ + 1682, + 362 + ], + [ + 1687, + 363 + ], + [ + 1689, + 366 + ], + [ + 1681, + 369 + ], + [ + 1681, + 372 + ], + [ + 1671, + 373 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1675, + 427 + ], + [ + 1681, + 427 + ], + [ + 1681, + 426 + ], + [ + 1680, + 423 + ], + [ + 1675, + 421 + ], + [ + 1674, + 418 + ], + [ + 1678, + 419 + ], + [ + 1682, + 416 + ], + [ + 1681, + 412 + ], + [ + 1672, + 411 + ], + [ + 1674, + 408 + ], + [ + 1678, + 409 + ], + [ + 1681, + 409 + ], + [ + 1681, + 405 + ], + [ + 1678, + 403 + ], + [ + 1668, + 400 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1690, + 395 + ], + [ + 1690, + 400 + ], + [ + 1695, + 400 + ], + [ + 1698, + 400 + ], + [ + 1698, + 403 + ], + [ + 1692, + 404 + ], + [ + 1690, + 408 + ], + [ + 1695, + 409 + ], + [ + 1696, + 409 + ], + [ + 1696, + 412 + ], + [ + 1692, + 414 + ], + [ + 1690, + 414 + ], + [ + 1690, + 418 + ], + [ + 1695, + 418 + ], + [ + 1698, + 420 + ], + [ + 1693, + 423 + ], + [ + 1686, + 426 + ], + [ + 1683, + 426 + ], + [ + 1681, + 394 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1657, + 470 + ], + [ + 1651, + 171 + ], + [ + 1673, + 174 + ], + [ + 1678, + 480 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1640, + 530 + ], + [ + 1624, + 530 + ], + [ + 1585, + 527 + ], + [ + 1554, + 525 + ], + [ + 1543, + 521 + ], + [ + 1542, + 498 + ], + [ + 1542, + 478 + ], + [ + 1553, + 470 + ], + [ + 1565, + 467 + ], + [ + 1575, + 458 + ], + [ + 1594, + 458 + ], + [ + 1599, + 464 + ], + [ + 1613, + 452 + ], + [ + 1626, + 447 + ], + [ + 1648, + 442 + ], + [ + 1664, + 450 + ], + [ + 1676, + 458 + ], + [ + 1684, + 448 + ], + [ + 1699, + 448 + ], + [ + 1719, + 450 + ], + [ + 1737, + 445 + ], + [ + 1747, + 439 + ], + [ + 1766, + 450 + ], + [ + 1782, + 450 + ], + [ + 1801, + 450 + ], + [ + 1813, + 450 + ], + [ + 1827, + 449 + ], + [ + 1854, + 448 + ], + [ + 1870, + 450 + ], + [ + 1889, + 449 + ], + [ + 1913, + 441 + ], + [ + 1934, + 436 + ], + [ + 1940, + 425 + ], + [ + 1948, + 420 + ], + [ + 1968, + 413 + ], + [ + 1977, + 411 + ], + [ + 1985, + 410 + ], + [ + 1996, + 385 + ], + [ + 2013, + 375 + ], + [ + 2003, + 364 + ], + [ + 1988, + 365 + ], + [ + 1978, + 347 + ], + [ + 1987, + 330 + ], + [ + 2013, + 328 + ], + [ + 2026, + 335 + ], + [ + 2043, + 339 + ], + [ + 2048, + 339 + ], + [ + 2048, + 571 + ], + [ + 1800, + 547 + ], + [ + 1745, + 541 + ], + [ + 1680, + 537 + ], + [ + 1664, + 533 + ], + [ + 1672, + 528 + ], + [ + 1668, + 525 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1509, + 188 + ], + [ + 1517, + 188 + ], + [ + 1520, + 390 + ], + [ + 1523, + 393 + ], + [ + 1524, + 513 + ], + [ + 1511, + 514 + ], + [ + 1507, + 394 + ], + [ + 1511, + 390 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1640, + 530 + ], + [ + 1624, + 530 + ], + [ + 1585, + 527 + ], + [ + 1554, + 525 + ], + [ + 1543, + 521 + ], + [ + 1542, + 498 + ], + [ + 1542, + 478 + ], + [ + 1553, + 470 + ], + [ + 1565, + 467 + ], + [ + 1575, + 458 + ], + [ + 1594, + 458 + ], + [ + 1599, + 464 + ], + [ + 1613, + 452 + ], + [ + 1626, + 447 + ], + [ + 1648, + 442 + ], + [ + 1664, + 450 + ], + [ + 1676, + 458 + ], + [ + 1684, + 448 + ], + [ + 1699, + 448 + ], + [ + 1719, + 450 + ], + [ + 1737, + 445 + ], + [ + 1747, + 439 + ], + [ + 1766, + 450 + ], + [ + 1782, + 450 + ], + [ + 1801, + 450 + ], + [ + 1813, + 450 + ], + [ + 1827, + 449 + ], + [ + 1854, + 448 + ], + [ + 1870, + 450 + ], + [ + 1889, + 449 + ], + [ + 1913, + 441 + ], + [ + 1934, + 436 + ], + [ + 1940, + 425 + ], + [ + 1948, + 420 + ], + [ + 1968, + 413 + ], + [ + 1977, + 411 + ], + [ + 1985, + 410 + ], + [ + 1996, + 385 + ], + [ + 2013, + 375 + ], + [ + 2003, + 364 + ], + [ + 1988, + 365 + ], + [ + 1978, + 347 + ], + [ + 1987, + 330 + ], + [ + 2013, + 328 + ], + [ + 2026, + 335 + ], + [ + 2043, + 339 + ], + [ + 2048, + 339 + ], + [ + 2048, + 571 + ], + [ + 1800, + 547 + ], + [ + 1745, + 541 + ], + [ + 1680, + 537 + ], + [ + 1664, + 533 + ], + [ + 1672, + 528 + ], + [ + 1668, + 525 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1550, + 203 + ], + [ + 1540, + 198 + ], + [ + 1530, + 197 + ], + [ + 1522, + 198 + ], + [ + 1511, + 206 + ], + [ + 1507, + 219 + ], + [ + 1506, + 235 + ], + [ + 1511, + 254 + ], + [ + 1519, + 264 + ], + [ + 1535, + 264 + ], + [ + 1549, + 260 + ], + [ + 1557, + 254 + ], + [ + 1563, + 237 + ], + [ + 1561, + 220 + ], + [ + 1556, + 207 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1534, + 548 + ], + [ + 1525, + 2 + ], + [ + 1525, + 0 + ], + [ + 1539, + 0 + ], + [ + 1541, + 3 + ], + [ + 1557, + 550 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1650, + 32 + ], + [ + 1637, + 35 + ], + [ + 1626, + 41 + ], + [ + 1613, + 54 + ], + [ + 1609, + 70 + ], + [ + 1610, + 91 + ], + [ + 1616, + 109 + ], + [ + 1626, + 118 + ], + [ + 1650, + 126 + ], + [ + 1670, + 124 + ], + [ + 1683, + 119 + ], + [ + 1697, + 100 + ], + [ + 1699, + 81 + ], + [ + 1698, + 62 + ], + [ + 1684, + 46 + ], + [ + 1669, + 35 + ], + [ + 1659, + 32 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1653, + 109 + ], + [ + 1659, + 107 + ], + [ + 1662, + 108 + ], + [ + 1665, + 112 + ], + [ + 1728, + 211 + ], + [ + 1728, + 216 + ], + [ + 1727, + 220 + ], + [ + 1720, + 222 + ], + [ + 1607, + 227 + ], + [ + 1600, + 227 + ], + [ + 1596, + 225 + ], + [ + 1594, + 216 + ], + [ + 1596, + 212 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1565, + 22 + ], + [ + 1565, + 39 + ], + [ + 1560, + 48 + ], + [ + 1574, + 436 + ], + [ + 1569, + 447 + ], + [ + 1574, + 583 + ], + [ + 1568, + 595 + ], + [ + 1578, + 597 + ], + [ + 1601, + 597 + ], + [ + 1607, + 595 + ], + [ + 1600, + 583 + ], + [ + 1598, + 448 + ], + [ + 1591, + 432 + ], + [ + 1581, + 48 + ], + [ + 1577, + 37 + ], + [ + 1577, + 3 + ], + [ + 1577, + 0 + ], + [ + 1565, + 0 + ], + [ + 1565, + 3 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1604, + 119 + ], + [ + 1606, + 210 + ], + [ + 1559, + 211 + ], + [ + 1560, + 76 + ], + [ + 1602, + 75 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1680, + 646 + ], + [ + 1671, + 469 + ], + [ + 1691, + 469 + ], + [ + 1702, + 650 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1931, + 734 + ], + [ + 1924, + 488 + ], + [ + 1927, + 483 + ], + [ + 1935, + 479 + ], + [ + 1943, + 479 + ], + [ + 1949, + 485 + ], + [ + 1956, + 495 + ], + [ + 1961, + 735 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1237, + 492 + ], + [ + 1237, + 518 + ], + [ + 1228, + 519 + ], + [ + 1228, + 456 + ], + [ + 1237, + 458 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1280, + 456 + ], + [ + 1289, + 461 + ], + [ + 1299, + 467 + ], + [ + 1316, + 472 + ], + [ + 1332, + 473 + ], + [ + 1347, + 472 + ], + [ + 1363, + 465 + ], + [ + 1377, + 459 + ], + [ + 1378, + 461 + ], + [ + 1370, + 467 + ], + [ + 1355, + 473 + ], + [ + 1339, + 476 + ], + [ + 1322, + 478 + ], + [ + 1306, + 475 + ], + [ + 1285, + 466 + ], + [ + 1276, + 459 + ], + [ + 1273, + 460 + ], + [ + 1260, + 466 + ], + [ + 1249, + 471 + ], + [ + 1232, + 474 + ], + [ + 1205, + 476 + ], + [ + 1183, + 472 + ], + [ + 1163, + 466 + ], + [ + 1156, + 460 + ], + [ + 1160, + 459 + ], + [ + 1174, + 465 + ], + [ + 1189, + 468 + ], + [ + 1205, + 472 + ], + [ + 1222, + 472 + ], + [ + 1248, + 469 + ], + [ + 1262, + 464 + ], + [ + 1274, + 458 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1483, + 461 + ], + [ + 1483, + 516 + ], + [ + 1477, + 516 + ], + [ + 1478, + 461 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1373, + 447 + ], + [ + 1374, + 533 + ], + [ + 1383, + 533 + ], + [ + 1382, + 446 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1280, + 445 + ], + [ + 1273, + 445 + ], + [ + 1274, + 523 + ], + [ + 1281, + 524 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1456, + 564 + ], + [ + 1442, + 564 + ], + [ + 1442, + 445 + ], + [ + 1454, + 448 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 878, + 447 + ], + [ + 878, + 425 + ], + [ + 862, + 424 + ], + [ + 864, + 449 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 873, + 463 + ], + [ + 873, + 451 + ], + [ + 864, + 452 + ], + [ + 864, + 461 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1173, + 247 + ], + [ + 1174, + 320 + ], + [ + 1197, + 317 + ], + [ + 1196, + 288 + ], + [ + 1198, + 284 + ], + [ + 1198, + 279 + ], + [ + 1195, + 276 + ], + [ + 1195, + 266 + ], + [ + 1198, + 260 + ], + [ + 1198, + 257 + ], + [ + 1195, + 253 + ], + [ + 1188, + 249 + ], + [ + 1185, + 247 + ], + [ + 1184, + 248 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1201, + 188 + ], + [ + 1200, + 173 + ], + [ + 1185, + 178 + ], + [ + 1185, + 199 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 930, + 342 + ], + [ + 934, + 518 + ], + [ + 939, + 518 + ], + [ + 934, + 343 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 932, + 338 + ], + [ + 947, + 339 + ], + [ + 945, + 344 + ], + [ + 936, + 347 + ], + [ + 930, + 346 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 893, + 329 + ], + [ + 899, + 330 + ], + [ + 904, + 522 + ], + [ + 893, + 524 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 912, + 519 + ], + [ + 912, + 501 + ], + [ + 916, + 501 + ], + [ + 916, + 519 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 886, + 518 + ], + [ + 886, + 502 + ], + [ + 889, + 502 + ], + [ + 890, + 517 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 875, + 499 + ], + [ + 871, + 499 + ], + [ + 872, + 519 + ], + [ + 875, + 519 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 831, + 470 + ], + [ + 843, + 472 + ], + [ + 856, + 473 + ], + [ + 865, + 473 + ], + [ + 872, + 475 + ], + [ + 873, + 480 + ], + [ + 869, + 488 + ], + [ + 866, + 493 + ], + [ + 862, + 498 + ], + [ + 859, + 505 + ], + [ + 858, + 513 + ], + [ + 855, + 516 + ], + [ + 841, + 517 + ], + [ + 828, + 517 + ], + [ + 817, + 517 + ], + [ + 803, + 514 + ], + [ + 802, + 503 + ], + [ + 805, + 485 + ], + [ + 811, + 478 + ], + [ + 820, + 476 + ], + [ + 827, + 472 + ], + [ + 836, + 471 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 859, + 521 + ], + [ + 859, + 500 + ], + [ + 863, + 500 + ], + [ + 863, + 519 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 846, + 519 + ], + [ + 845, + 497 + ], + [ + 850, + 497 + ], + [ + 851, + 520 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 840, + 437 + ], + [ + 837, + 439 + ], + [ + 834, + 441 + ], + [ + 832, + 446 + ], + [ + 834, + 450 + ], + [ + 836, + 454 + ], + [ + 843, + 454 + ], + [ + 846, + 450 + ], + [ + 848, + 444 + ], + [ + 845, + 440 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 838, + 460 + ], + [ + 837, + 519 + ], + [ + 840, + 519 + ], + [ + 841, + 442 + ], + [ + 838, + 441 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 780, + 478 + ], + [ + 783, + 486 + ], + [ + 788, + 491 + ], + [ + 793, + 488 + ], + [ + 793, + 478 + ], + [ + 796, + 477 + ], + [ + 805, + 479 + ], + [ + 805, + 495 + ], + [ + 803, + 502 + ], + [ + 797, + 505 + ], + [ + 787, + 505 + ], + [ + 775, + 505 + ], + [ + 768, + 499 + ], + [ + 770, + 483 + ], + [ + 772, + 479 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 821, + 496 + ], + [ + 821, + 518 + ], + [ + 816, + 519 + ], + [ + 817, + 493 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 796, + 138 + ], + [ + 799, + 519 + ], + [ + 813, + 519 + ], + [ + 806, + 138 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 784, + 520 + ], + [ + 784, + 497 + ], + [ + 790, + 496 + ], + [ + 790, + 519 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 774, + 443 + ], + [ + 777, + 521 + ], + [ + 772, + 520 + ], + [ + 771, + 450 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 718, + 483 + ], + [ + 704, + 485 + ], + [ + 692, + 482 + ], + [ + 677, + 450 + ], + [ + 667, + 433 + ], + [ + 659, + 430 + ], + [ + 647, + 431 + ], + [ + 646, + 445 + ], + [ + 652, + 460 + ], + [ + 645, + 473 + ], + [ + 639, + 481 + ], + [ + 643, + 493 + ], + [ + 650, + 508 + ], + [ + 666, + 511 + ], + [ + 683, + 519 + ], + [ + 698, + 516 + ], + [ + 722, + 513 + ], + [ + 737, + 504 + ], + [ + 737, + 490 + ], + [ + 729, + 486 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 725, + 451 + ], + [ + 726, + 522 + ], + [ + 729, + 522 + ], + [ + 729, + 449 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 728, + 443 + ], + [ + 773, + 443 + ], + [ + 771, + 510 + ], + [ + 729, + 512 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 760, + 492 + ], + [ + 753, + 492 + ], + [ + 755, + 496 + ], + [ + 761, + 499 + ], + [ + 763, + 501 + ], + [ + 758, + 508 + ], + [ + 754, + 511 + ], + [ + 754, + 519 + ], + [ + 759, + 520 + ], + [ + 764, + 516 + ], + [ + 765, + 516 + ], + [ + 769, + 519 + ], + [ + 771, + 512 + ], + [ + 770, + 505 + ], + [ + 770, + 499 + ], + [ + 773, + 497 + ], + [ + 774, + 497 + ], + [ + 774, + 492 + ], + [ + 768, + 492 + ], + [ + 766, + 491 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 735, + 525 + ], + [ + 731, + 525 + ], + [ + 731, + 494 + ], + [ + 735, + 494 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 750, + 524 + ], + [ + 750, + 493 + ], + [ + 753, + 493 + ], + [ + 754, + 523 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 717, + 399 + ], + [ + 718, + 429 + ], + [ + 698, + 431 + ], + [ + 700, + 401 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 708, + 526 + ], + [ + 706, + 402 + ], + [ + 709, + 403 + ], + [ + 712, + 528 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 906, + 368 + ], + [ + 910, + 371 + ], + [ + 915, + 373 + ], + [ + 919, + 377 + ], + [ + 913, + 378 + ], + [ + 911, + 381 + ], + [ + 911, + 386 + ], + [ + 917, + 387 + ], + [ + 917, + 391 + ], + [ + 915, + 394 + ], + [ + 911, + 394 + ], + [ + 911, + 398 + ], + [ + 916, + 399 + ], + [ + 917, + 401 + ], + [ + 916, + 407 + ], + [ + 913, + 407 + ], + [ + 912, + 411 + ], + [ + 906, + 412 + ], + [ + 901, + 411 + ], + [ + 900, + 402 + ], + [ + 900, + 369 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 687, + 7 + ], + [ + 691, + 416 + ], + [ + 694, + 517 + ], + [ + 694, + 523 + ], + [ + 665, + 522 + ], + [ + 662, + 178 + ], + [ + 660, + 7 + ], + [ + 660, + 0 + ], + [ + 690, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 648, + 524 + ], + [ + 643, + 524 + ], + [ + 644, + 491 + ], + [ + 647, + 492 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 631, + 502 + ], + [ + 632, + 496 + ], + [ + 632, + 489 + ], + [ + 630, + 485 + ], + [ + 637, + 484 + ], + [ + 636, + 480 + ], + [ + 632, + 479 + ], + [ + 622, + 481 + ], + [ + 615, + 484 + ], + [ + 613, + 488 + ], + [ + 620, + 491 + ], + [ + 621, + 494 + ], + [ + 625, + 506 + ], + [ + 625, + 517 + ], + [ + 630, + 524 + ], + [ + 636, + 523 + ], + [ + 636, + 517 + ], + [ + 632, + 508 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 741, + 305 + ], + [ + 712, + 256 + ], + [ + 683, + 305 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 675, + 408 + ], + [ + 673, + 469 + ], + [ + 654, + 465 + ], + [ + 657, + 410 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 699, + 479 + ], + [ + 699, + 523 + ], + [ + 687, + 524 + ], + [ + 688, + 482 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 693, + 71 + ], + [ + 707, + 69 + ], + [ + 713, + 75 + ], + [ + 713, + 76 + ], + [ + 1166, + 19 + ], + [ + 1170, + 22 + ], + [ + 1170, + 25 + ], + [ + 718, + 84 + ], + [ + 719, + 85 + ], + [ + 728, + 88 + ], + [ + 739, + 88 + ], + [ + 1036, + 51 + ], + [ + 1036, + 57 + ], + [ + 1024, + 57 + ], + [ + 786, + 87 + ], + [ + 715, + 99 + ], + [ + 704, + 122 + ], + [ + 687, + 135 + ], + [ + 688, + 107 + ], + [ + 688, + 99 + ], + [ + 681, + 94 + ], + [ + 655, + 105 + ], + [ + 640, + 111 + ], + [ + 616, + 118 + ], + [ + 474, + 154 + ], + [ + 476, + 148 + ], + [ + 626, + 108 + ], + [ + 604, + 108 + ], + [ + 450, + 150 + ], + [ + 449, + 145 + ], + [ + 638, + 94 + ], + [ + 638, + 89 + ], + [ + 633, + 76 + ], + [ + 630, + 54 + ], + [ + 643, + 54 + ], + [ + 657, + 62 + ], + [ + 665, + 79 + ], + [ + 674, + 87 + ], + [ + 677, + 87 + ], + [ + 684, + 81 + ], + [ + 689, + 81 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1173, + 97 + ], + [ + 1173, + 102 + ], + [ + 678, + 153 + ], + [ + 678, + 148 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 655, + 155 + ], + [ + 456, + 201 + ], + [ + 456, + 206 + ], + [ + 662, + 158 + ], + [ + 661, + 152 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 583, + 162 + ], + [ + 573, + 163 + ], + [ + 574, + 183 + ], + [ + 581, + 183 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 504, + 215 + ], + [ + 504, + 194 + ], + [ + 495, + 197 + ], + [ + 495, + 215 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 601, + 491 + ], + [ + 602, + 526 + ], + [ + 605, + 526 + ], + [ + 604, + 491 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 552, + 491 + ], + [ + 556, + 491 + ], + [ + 556, + 527 + ], + [ + 553, + 527 + ], + [ + 552, + 527 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 497, + 493 + ], + [ + 501, + 493 + ], + [ + 503, + 529 + ], + [ + 498, + 528 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 380, + 491 + ], + [ + 384, + 491 + ], + [ + 382, + 527 + ], + [ + 380, + 527 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 332, + 482 + ], + [ + 332, + 504 + ], + [ + 315, + 503 + ], + [ + 313, + 478 + ], + [ + 325, + 478 + ], + [ + 325, + 483 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 311, + 491 + ], + [ + 311, + 531 + ], + [ + 315, + 531 + ], + [ + 314, + 490 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 208, + 481 + ], + [ + 210, + 503 + ], + [ + 193, + 502 + ], + [ + 194, + 482 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 159, + 7 + ], + [ + 167, + 523 + ], + [ + 180, + 523 + ], + [ + 166, + 10 + ], + [ + 166, + 0 + ], + [ + 159, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 159, + 491 + ], + [ + 152, + 492 + ], + [ + 153, + 527 + ], + [ + 158, + 527 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 140, + 448 + ], + [ + 134, + 448 + ], + [ + 131, + 450 + ], + [ + 128, + 456 + ], + [ + 128, + 464 + ], + [ + 125, + 477 + ], + [ + 124, + 493 + ], + [ + 122, + 503 + ], + [ + 118, + 506 + ], + [ + 121, + 515 + ], + [ + 128, + 516 + ], + [ + 134, + 514 + ], + [ + 137, + 500 + ], + [ + 139, + 505 + ], + [ + 141, + 513 + ], + [ + 145, + 515 + ], + [ + 153, + 514 + ], + [ + 152, + 507 + ], + [ + 149, + 494 + ], + [ + 148, + 483 + ], + [ + 146, + 468 + ], + [ + 144, + 464 + ], + [ + 142, + 460 + ], + [ + 145, + 455 + ], + [ + 145, + 450 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 31, + 448 + ], + [ + 36, + 450 + ], + [ + 38, + 457 + ], + [ + 44, + 460 + ], + [ + 46, + 470 + ], + [ + 48, + 478 + ], + [ + 48, + 487 + ], + [ + 46, + 498 + ], + [ + 46, + 515 + ], + [ + 36, + 513 + ], + [ + 33, + 507 + ], + [ + 31, + 488 + ], + [ + 26, + 469 + ], + [ + 25, + 459 + ], + [ + 25, + 453 + ], + [ + 26, + 450 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 21, + 447 + ], + [ + 26, + 449 + ], + [ + 28, + 453 + ], + [ + 29, + 458 + ], + [ + 33, + 464 + ], + [ + 34, + 469 + ], + [ + 36, + 475 + ], + [ + 37, + 480 + ], + [ + 34, + 484 + ], + [ + 35, + 494 + ], + [ + 35, + 506 + ], + [ + 36, + 511 + ], + [ + 35, + 515 + ], + [ + 32, + 517 + ], + [ + 29, + 514 + ], + [ + 24, + 501 + ], + [ + 23, + 494 + ], + [ + 21, + 501 + ], + [ + 21, + 508 + ], + [ + 23, + 514 + ], + [ + 21, + 517 + ], + [ + 17, + 515 + ], + [ + 14, + 505 + ], + [ + 11, + 490 + ], + [ + 7, + 482 + ], + [ + 5, + 474 + ], + [ + 7, + 462 + ], + [ + 12, + 456 + ], + [ + 13, + 450 + ], + [ + 17, + 447 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 145, + 304 + ], + [ + 171, + 265 + ], + [ + 199, + 306 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 187, + 317 + ], + [ + 189, + 324 + ], + [ + 187, + 331 + ], + [ + 175, + 340 + ], + [ + 164, + 339 + ], + [ + 155, + 329 + ], + [ + 153, + 320 + ], + [ + 154, + 311 + ], + [ + 159, + 305 + ], + [ + 168, + 305 + ], + [ + 178, + 305 + ], + [ + 184, + 310 + ], + [ + 187, + 313 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 152, + 348 + ], + [ + 167, + 350 + ], + [ + 167, + 404 + ], + [ + 149, + 401 + ], + [ + 149, + 394 + ], + [ + 145, + 392 + ], + [ + 145, + 387 + ], + [ + 149, + 385 + ], + [ + 149, + 380 + ], + [ + 146, + 378 + ], + [ + 145, + 375 + ], + [ + 147, + 371 + ], + [ + 149, + 367 + ], + [ + 150, + 365 + ], + [ + 146, + 361 + ], + [ + 145, + 354 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 210, + 261 + ], + [ + 215, + 265 + ], + [ + 220, + 270 + ], + [ + 221, + 278 + ], + [ + 222, + 289 + ], + [ + 219, + 298 + ], + [ + 214, + 306 + ], + [ + 209, + 309 + ], + [ + 197, + 309 + ], + [ + 184, + 305 + ], + [ + 177, + 294 + ], + [ + 174, + 275 + ], + [ + 174, + 267 + ], + [ + 185, + 260 + ], + [ + 192, + 258 + ], + [ + 200, + 258 + ], + [ + 206, + 260 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 195, + 264 + ], + [ + 194, + 529 + ], + [ + 201, + 528 + ], + [ + 202, + 261 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 63, + 493 + ], + [ + 65, + 533 + ], + [ + 70, + 533 + ], + [ + 70, + 493 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 11, + 532 + ], + [ + 5, + 533 + ], + [ + 5, + 477 + ], + [ + 10, + 476 + ], + [ + 11, + 476 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 11, + 849 + ], + [ + 356, + 704 + ], + [ + 529, + 632 + ], + [ + 589, + 593 + ], + [ + 612, + 574 + ], + [ + 608, + 563 + ], + [ + 583, + 556 + ], + [ + 556, + 547 + ], + [ + 532, + 542 + ], + [ + 517, + 541 + ], + [ + 498, + 540 + ], + [ + 477, + 544 + ], + [ + 457, + 544 + ], + [ + 432, + 544 + ], + [ + 416, + 542 + ], + [ + 399, + 541 + ], + [ + 381, + 538 + ], + [ + 357, + 537 + ], + [ + 343, + 535 + ], + [ + 331, + 529 + ], + [ + 319, + 525 + ], + [ + 296, + 525 + ], + [ + 285, + 526 + ], + [ + 268, + 519 + ], + [ + 243, + 519 + ], + [ + 218, + 519 + ], + [ + 205, + 515 + ], + [ + 193, + 513 + ], + [ + 185, + 508 + ], + [ + 171, + 512 + ], + [ + 160, + 519 + ], + [ + 147, + 519 + ], + [ + 131, + 519 + ], + [ + 98, + 520 + ], + [ + 64, + 522 + ], + [ + 44, + 522 + ], + [ + 28, + 527 + ], + [ + 1, + 526 + ], + [ + 0, + 527 + ], + [ + 0, + 856 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000108_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000108_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..e251fe2682cbac0786e918444af815d4d2b1bda6 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000108_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000109_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000109_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..183d0bfd0ead68b5097e9164527394154051b31e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000109_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000109_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000109_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..56e6bed3469fc5d8b8ecab2d9e71bae3a29bdbd0 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000109_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000112_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000112_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..bf10951f298b20c391ee63d0a022d105f5070d16 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000112_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000113_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000113_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..16d576e61d7272025c617e6d6a1e1e51c652a286 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000113_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000113_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000113_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5144f61b734b7e7800f4cf128314980399a0e60b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000113_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..87fb2e69ffddd865d61e0cff08655382b9021aed Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5aa1d78945e5f7d114ed757be6c6cd562faaf95f Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d6c6cac40f14247f6b5513403ff30062c8279888 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000114_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000115_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000115_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..efa13eab3d5ec25f5f6289987c260f09cb4dc6b2 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000115_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000115_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000115_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..dc890ed1317257073f45bf32f0127c6c10d29da7 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000115_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000116_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000116_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..7efbe944945aa6258196ed117e7c99421905be95 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000116_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000117_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000117_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..a7ade3f4b1d42c90f014c5d29b918d680247122d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000117_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000117_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000117_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..674d47ef8d2fe93c5a6c7ee1155d3d0a8ceaf52d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000117_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000118_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000118_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..9db9cdf0ebd245b680b38f4c38bf8860e0889378 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000118_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000119_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000119_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c23c88516a625ee4928761556a603cbab0939f43 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000119_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..dea05ecf61755d4168c368d332b523c461958950 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..cb725829f518aeb4a5366f360da62eeb27bd6ff4 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d49652d1b3faa2913e827ff35caa8b73e47e0280 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000120_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..f8adb19b96fd7ecacf58e06daa0428fa40696b4c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..44decfc0de9d3064fc9277f6fb2c56befe08bafa Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..b730b12ffb7c552ecfabf2232820cec2d4c604f9 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000121_000019_gtFine_polygons.json @@ -0,0 +1,5393 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 551, + 25 + ], + [ + 655, + 273 + ], + [ + 822, + 277 + ], + [ + 990, + 273 + ], + [ + 1245, + 225 + ], + [ + 1242, + 0 + ], + [ + 538, + 0 + ], + [ + 538, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2019, + 512 + ], + [ + 1509, + 509 + ], + [ + 1288, + 499 + ], + [ + 1219, + 479 + ], + [ + 1130, + 474 + ], + [ + 885, + 473 + ], + [ + 677, + 484 + ], + [ + 582, + 517 + ], + [ + 161, + 537 + ], + [ + 54, + 613 + ], + [ + 0, + 649 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 501 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2034, + 564 + ], + [ + 1497, + 542 + ], + [ + 1310, + 530 + ], + [ + 1297, + 530 + ], + [ + 1285, + 519 + ], + [ + 1270, + 515 + ], + [ + 1243, + 509 + ], + [ + 1179, + 507 + ], + [ + 1130, + 507 + ], + [ + 1106, + 507 + ], + [ + 1086, + 503 + ], + [ + 1089, + 497 + ], + [ + 1130, + 493 + ], + [ + 1180, + 487 + ], + [ + 1274, + 486 + ], + [ + 2048, + 499 + ], + [ + 2048, + 566 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 2047, + 680 + ], + [ + 1553, + 707 + ], + [ + 1868, + 1023 + ], + [ + 2047, + 1023 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1551, + 709 + ], + [ + 1515, + 687 + ], + [ + 1406, + 592 + ], + [ + 1408, + 588 + ], + [ + 1590, + 601 + ], + [ + 1919, + 611 + ], + [ + 2048, + 616 + ], + [ + 2048, + 751 + ], + [ + 1644, + 723 + ], + [ + 1589, + 719 + ], + [ + 1564, + 715 + ], + [ + 1556, + 713 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 907, + 497 + ], + [ + 877, + 497 + ], + [ + 838, + 498 + ], + [ + 802, + 493 + ], + [ + 792, + 484 + ], + [ + 1120, + 482 + ], + [ + 1153, + 486 + ], + [ + 1142, + 492 + ], + [ + 1113, + 494 + ], + [ + 1017, + 493 + ], + [ + 989, + 495 + ], + [ + 955, + 496 + ], + [ + 927, + 496 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 275, + 551 + ], + [ + 527, + 549 + ], + [ + 629, + 542 + ], + [ + 701, + 543 + ], + [ + 743, + 540 + ], + [ + 761, + 533 + ], + [ + 779, + 531 + ], + [ + 796, + 527 + ], + [ + 806, + 517 + ], + [ + 814, + 501 + ], + [ + 796, + 498 + ], + [ + 730, + 488 + ], + [ + 382, + 485 + ], + [ + 88, + 511 + ], + [ + 105, + 547 + ], + [ + 220, + 559 + ], + [ + 236, + 553 + ], + [ + 252, + 552 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 20, + 665 + ], + [ + 81, + 656 + ], + [ + 126, + 646 + ], + [ + 191, + 634 + ], + [ + 242, + 619 + ], + [ + 265, + 604 + ], + [ + 252, + 591 + ], + [ + 192, + 576 + ], + [ + 0, + 571 + ], + [ + 0, + 669 + ], + [ + 16, + 666 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1193, + 166 + ], + [ + 1051, + 166 + ], + [ + 1050, + 158 + ], + [ + 1016, + 158 + ], + [ + 1002, + 167 + ], + [ + 970, + 169 + ], + [ + 959, + 185 + ], + [ + 934, + 188 + ], + [ + 913, + 186 + ], + [ + 912, + 180 + ], + [ + 902, + 179 + ], + [ + 905, + 204 + ], + [ + 879, + 235 + ], + [ + 867, + 249 + ], + [ + 853, + 243 + ], + [ + 810, + 203 + ], + [ + 797, + 202 + ], + [ + 793, + 203 + ], + [ + 794, + 210 + ], + [ + 794, + 213 + ], + [ + 786, + 213 + ], + [ + 785, + 207 + ], + [ + 772, + 209 + ], + [ + 775, + 218 + ], + [ + 767, + 221 + ], + [ + 763, + 217 + ], + [ + 763, + 214 + ], + [ + 755, + 214 + ], + [ + 755, + 221 + ], + [ + 754, + 222 + ], + [ + 746, + 221 + ], + [ + 724, + 228 + ], + [ + 723, + 236 + ], + [ + 697, + 242 + ], + [ + 692, + 235 + ], + [ + 687, + 236 + ], + [ + 689, + 243 + ], + [ + 681, + 244 + ], + [ + 671, + 228 + ], + [ + 671, + 162 + ], + [ + 678, + 157 + ], + [ + 690, + 150 + ], + [ + 648, + 117 + ], + [ + 655, + 101 + ], + [ + 667, + 95 + ], + [ + 663, + 89 + ], + [ + 642, + 77 + ], + [ + 642, + 1 + ], + [ + 641, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 524 + ], + [ + 215, + 532 + ], + [ + 293, + 533 + ], + [ + 343, + 528 + ], + [ + 427, + 524 + ], + [ + 511, + 521 + ], + [ + 575, + 519 + ], + [ + 617, + 518 + ], + [ + 642, + 516 + ], + [ + 674, + 508 + ], + [ + 703, + 515 + ], + [ + 709, + 515 + ], + [ + 709, + 505 + ], + [ + 711, + 491 + ], + [ + 738, + 488 + ], + [ + 791, + 485 + ], + [ + 818, + 490 + ], + [ + 848, + 490 + ], + [ + 894, + 491 + ], + [ + 951, + 491 + ], + [ + 1006, + 490 + ], + [ + 1078, + 489 + ], + [ + 1179, + 484 + ], + [ + 1199, + 484 + ], + [ + 1218, + 418 + ], + [ + 1222, + 275 + ], + [ + 1212, + 167 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1055, + 396 + ], + [ + 1031, + 396 + ], + [ + 1018, + 397 + ], + [ + 1000, + 399 + ], + [ + 990, + 381 + ], + [ + 971, + 376 + ], + [ + 971, + 354 + ], + [ + 978, + 325 + ], + [ + 992, + 304 + ], + [ + 994, + 295 + ], + [ + 1002, + 291 + ], + [ + 1003, + 270 + ], + [ + 1005, + 267 + ], + [ + 1019, + 270 + ], + [ + 1025, + 274 + ], + [ + 1031, + 264 + ], + [ + 1031, + 253 + ], + [ + 1045, + 248 + ], + [ + 1056, + 256 + ], + [ + 1060, + 264 + ], + [ + 1063, + 274 + ], + [ + 1070, + 270 + ], + [ + 1078, + 257 + ], + [ + 1089, + 258 + ], + [ + 1094, + 272 + ], + [ + 1099, + 280 + ], + [ + 1103, + 278 + ], + [ + 1095, + 269 + ], + [ + 1108, + 256 + ], + [ + 1107, + 245 + ], + [ + 1114, + 242 + ], + [ + 1125, + 223 + ], + [ + 1130, + 220 + ], + [ + 1145, + 223 + ], + [ + 1146, + 207 + ], + [ + 1168, + 208 + ], + [ + 1175, + 207 + ], + [ + 1169, + 201 + ], + [ + 1163, + 196 + ], + [ + 1175, + 191 + ], + [ + 1183, + 192 + ], + [ + 1196, + 212 + ], + [ + 1207, + 427 + ], + [ + 1173, + 425 + ], + [ + 1163, + 420 + ], + [ + 1152, + 417 + ], + [ + 1127, + 416 + ], + [ + 1110, + 417 + ], + [ + 1096, + 409 + ], + [ + 1087, + 405 + ], + [ + 1081, + 411 + ], + [ + 1066, + 410 + ], + [ + 1062, + 402 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1115, + 436 + ], + [ + 1122, + 436 + ], + [ + 1122, + 495 + ], + [ + 1117, + 495 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1113, + 447 + ], + [ + 1114, + 424 + ], + [ + 1122, + 423 + ], + [ + 1123, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1129, + 431 + ], + [ + 1133, + 430 + ], + [ + 1135, + 480 + ], + [ + 1128, + 480 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1129, + 423 + ], + [ + 1125, + 423 + ], + [ + 1124, + 420 + ], + [ + 1124, + 415 + ], + [ + 1124, + 414 + ], + [ + 1130, + 411 + ], + [ + 1135, + 413 + ], + [ + 1137, + 417 + ], + [ + 1138, + 422 + ], + [ + 1133, + 425 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1135, + 433 + ], + [ + 1128, + 423 + ], + [ + 1122, + 428 + ], + [ + 1130, + 438 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1154, + 435 + ], + [ + 1154, + 418 + ], + [ + 1189, + 418 + ], + [ + 1187, + 434 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1172, + 396 + ], + [ + 1172, + 432 + ], + [ + 1195, + 434 + ], + [ + 1195, + 398 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1164, + 457 + ], + [ + 1141, + 460 + ], + [ + 1133, + 460 + ], + [ + 1128, + 465 + ], + [ + 1122, + 473 + ], + [ + 1122, + 484 + ], + [ + 1125, + 487 + ], + [ + 1128, + 491 + ], + [ + 1135, + 495 + ], + [ + 1148, + 492 + ], + [ + 1165, + 479 + ], + [ + 1169, + 462 + ], + [ + 1169, + 459 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1184, + 17 + ], + [ + 1185, + 131 + ], + [ + 1190, + 380 + ], + [ + 1191, + 387 + ], + [ + 1172, + 392 + ], + [ + 1174, + 403 + ], + [ + 1182, + 408 + ], + [ + 1196, + 410 + ], + [ + 1197, + 410 + ], + [ + 1197, + 475 + ], + [ + 1196, + 500 + ], + [ + 1259, + 505 + ], + [ + 1296, + 503 + ], + [ + 1333, + 505 + ], + [ + 1377, + 506 + ], + [ + 1400, + 509 + ], + [ + 1418, + 510 + ], + [ + 1452, + 510 + ], + [ + 1487, + 512 + ], + [ + 1571, + 518 + ], + [ + 1637, + 518 + ], + [ + 1695, + 521 + ], + [ + 1749, + 522 + ], + [ + 1782, + 518 + ], + [ + 1793, + 520 + ], + [ + 1870, + 525 + ], + [ + 1895, + 523 + ], + [ + 1920, + 519 + ], + [ + 2027, + 524 + ], + [ + 2048, + 524 + ], + [ + 2048, + 523 + ], + [ + 2048, + 0 + ], + [ + 1203, + 0 + ], + [ + 1201, + 7 + ], + [ + 1193, + 7 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1031, + 137 + ], + [ + 1028, + 128 + ], + [ + 1002, + 128 + ], + [ + 1000, + 130 + ], + [ + 1000, + 135 + ], + [ + 1002, + 137 + ], + [ + 1003, + 138 + ], + [ + 1011, + 138 + ], + [ + 1023, + 137 + ], + [ + 1028, + 137 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 969, + 441 + ], + [ + 971, + 497 + ], + [ + 974, + 497 + ], + [ + 972, + 437 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 978, + 436 + ], + [ + 963, + 436 + ], + [ + 964, + 447 + ], + [ + 979, + 449 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 895, + 441 + ], + [ + 895, + 496 + ], + [ + 899, + 496 + ], + [ + 898, + 436 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 887, + 454 + ], + [ + 887, + 441 + ], + [ + 895, + 441 + ], + [ + 894, + 457 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 801, + 404 + ], + [ + 813, + 407 + ], + [ + 817, + 417 + ], + [ + 817, + 426 + ], + [ + 803, + 432 + ], + [ + 792, + 440 + ], + [ + 791, + 460 + ], + [ + 798, + 465 + ], + [ + 809, + 475 + ], + [ + 809, + 484 + ], + [ + 805, + 499 + ], + [ + 798, + 505 + ], + [ + 768, + 502 + ], + [ + 758, + 494 + ], + [ + 762, + 478 + ], + [ + 765, + 471 + ], + [ + 771, + 465 + ], + [ + 778, + 464 + ], + [ + 779, + 450 + ], + [ + 776, + 441 + ], + [ + 780, + 432 + ], + [ + 768, + 421 + ], + [ + 771, + 408 + ], + [ + 785, + 406 + ], + [ + 792, + 402 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1161, + 512 + ], + [ + 1155, + 510 + ], + [ + 1155, + 504 + ], + [ + 1154, + 504 + ], + [ + 1149, + 504 + ], + [ + 1148, + 510 + ], + [ + 1141, + 510 + ], + [ + 1138, + 506 + ], + [ + 1138, + 493 + ], + [ + 1138, + 485 + ], + [ + 1140, + 480 + ], + [ + 1146, + 476 + ], + [ + 1149, + 473 + ], + [ + 1153, + 469 + ], + [ + 1156, + 461 + ], + [ + 1160, + 455 + ], + [ + 1167, + 453 + ], + [ + 1175, + 455 + ], + [ + 1190, + 455 + ], + [ + 1205, + 456 + ], + [ + 1211, + 458 + ], + [ + 1215, + 464 + ], + [ + 1211, + 485 + ], + [ + 1183, + 495 + ], + [ + 1172, + 502 + ], + [ + 1168, + 506 + ], + [ + 1166, + 507 + ], + [ + 1166, + 508 + ], + [ + 1165, + 511 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1238, + 461 + ], + [ + 1244, + 471 + ], + [ + 1249, + 481 + ], + [ + 1252, + 493 + ], + [ + 1251, + 511 + ], + [ + 1248, + 513 + ], + [ + 1241, + 513 + ], + [ + 1238, + 509 + ], + [ + 1226, + 509 + ], + [ + 1200, + 510 + ], + [ + 1198, + 512 + ], + [ + 1188, + 514 + ], + [ + 1181, + 512 + ], + [ + 1180, + 508 + ], + [ + 1178, + 508 + ], + [ + 1175, + 513 + ], + [ + 1169, + 512 + ], + [ + 1166, + 510 + ], + [ + 1166, + 503 + ], + [ + 1167, + 492 + ], + [ + 1173, + 481 + ], + [ + 1179, + 475 + ], + [ + 1183, + 467 + ], + [ + 1188, + 463 + ], + [ + 1193, + 461 + ], + [ + 1210, + 460 + ], + [ + 1223, + 460 + ], + [ + 1232, + 460 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1275, + 507 + ], + [ + 1274, + 490 + ], + [ + 1257, + 489 + ], + [ + 1257, + 504 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1273, + 446 + ], + [ + 1262, + 449 + ], + [ + 1253, + 469 + ], + [ + 1254, + 481 + ], + [ + 1258, + 491 + ], + [ + 1266, + 491 + ], + [ + 1272, + 490 + ], + [ + 1275, + 485 + ], + [ + 1275, + 473 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1293, + 483 + ], + [ + 1316, + 483 + ], + [ + 1320, + 483 + ], + [ + 1318, + 505 + ], + [ + 1290, + 504 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1315, + 466 + ], + [ + 1304, + 467 + ], + [ + 1298, + 468 + ], + [ + 1290, + 469 + ], + [ + 1289, + 479 + ], + [ + 1296, + 490 + ], + [ + 1306, + 491 + ], + [ + 1318, + 483 + ], + [ + 1322, + 476 + ], + [ + 1322, + 473 + ], + [ + 1320, + 468 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1293, + 390 + ], + [ + 1301, + 390 + ], + [ + 1307, + 516 + ], + [ + 1304, + 517 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1329, + 49 + ], + [ + 1333, + 49 + ], + [ + 1338, + 521 + ], + [ + 1332, + 521 + ], + [ + 1331, + 521 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 1327, + 518 + ], + [ + 1321, + 520 + ], + [ + 1307, + 512 + ], + [ + 1305, + 499 + ], + [ + 1309, + 487 + ], + [ + 1322, + 482 + ], + [ + 1326, + 482 + ], + [ + 1326, + 475 + ], + [ + 1325, + 467 + ], + [ + 1333, + 464 + ], + [ + 1343, + 464 + ], + [ + 1346, + 463 + ], + [ + 1347, + 456 + ], + [ + 1365, + 456 + ], + [ + 1380, + 459 + ], + [ + 1376, + 471 + ], + [ + 1383, + 479 + ], + [ + 1376, + 493 + ], + [ + 1373, + 504 + ], + [ + 1372, + 514 + ], + [ + 1363, + 518 + ], + [ + 1349, + 518 + ], + [ + 1343, + 515 + ], + [ + 1338, + 519 + ], + [ + 1333, + 519 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1307, + 54 + ], + [ + 1300, + 48 + ], + [ + 1299, + 43 + ], + [ + 1327, + 42 + ], + [ + 1333, + 47 + ], + [ + 1331, + 52 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1326, + 410 + ], + [ + 1321, + 406 + ], + [ + 1321, + 401 + ], + [ + 1322, + 397 + ], + [ + 1323, + 396 + ], + [ + 1325, + 394 + ], + [ + 1331, + 391 + ], + [ + 1338, + 391 + ], + [ + 1344, + 397 + ], + [ + 1344, + 404 + ], + [ + 1343, + 410 + ], + [ + 1335, + 412 + ], + [ + 1332, + 412 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1370, + 344 + ], + [ + 1377, + 347 + ], + [ + 1380, + 527 + ], + [ + 1370, + 528 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1366, + 404 + ], + [ + 1355, + 403 + ], + [ + 1354, + 396 + ], + [ + 1365, + 396 + ], + [ + 1367, + 392 + ], + [ + 1364, + 387 + ], + [ + 1354, + 386 + ], + [ + 1354, + 381 + ], + [ + 1364, + 381 + ], + [ + 1365, + 379 + ], + [ + 1365, + 375 + ], + [ + 1355, + 370 + ], + [ + 1350, + 365 + ], + [ + 1359, + 364 + ], + [ + 1366, + 364 + ], + [ + 1372, + 365 + ], + [ + 1370, + 404 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1536, + 475 + ], + [ + 1572, + 475 + ], + [ + 1567, + 520 + ], + [ + 1434, + 512 + ], + [ + 1427, + 492 + ], + [ + 1441, + 476 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1396, + 376 + ], + [ + 1487, + 320 + ], + [ + 1557, + 307 + ], + [ + 1461, + 365 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 1475, + 471 + ], + [ + 1468, + 481 + ], + [ + 1464, + 477 + ], + [ + 1467, + 466 + ], + [ + 1474, + 463 + ], + [ + 1474, + 457 + ], + [ + 1461, + 461 + ], + [ + 1453, + 467 + ], + [ + 1444, + 475 + ], + [ + 1443, + 486 + ], + [ + 1455, + 491 + ], + [ + 1457, + 493 + ], + [ + 1453, + 511 + ], + [ + 1459, + 524 + ], + [ + 1470, + 531 + ], + [ + 1484, + 533 + ], + [ + 1490, + 530 + ], + [ + 1499, + 537 + ], + [ + 1501, + 533 + ], + [ + 1505, + 532 + ], + [ + 1519, + 533 + ], + [ + 1526, + 533 + ], + [ + 1522, + 525 + ], + [ + 1515, + 510 + ], + [ + 1514, + 497 + ], + [ + 1513, + 487 + ], + [ + 1513, + 481 + ], + [ + 1505, + 477 + ], + [ + 1498, + 476 + ], + [ + 1494, + 477 + ], + [ + 1499, + 472 + ], + [ + 1505, + 469 + ], + [ + 1507, + 463 + ], + [ + 1502, + 459 + ], + [ + 1489, + 464 + ], + [ + 1483, + 465 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1391, + 527 + ], + [ + 1386, + 529 + ], + [ + 1386, + 477 + ], + [ + 1391, + 478 + ], + [ + 1391, + 478 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1430, + 533 + ], + [ + 1423, + 533 + ], + [ + 1422, + 479 + ], + [ + 1429, + 480 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1466, + 533 + ], + [ + 1472, + 533 + ], + [ + 1472, + 502 + ], + [ + 1468, + 502 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1514, + 538 + ], + [ + 1508, + 538 + ], + [ + 1507, + 478 + ], + [ + 1514, + 478 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1554, + 533 + ], + [ + 1544, + 538 + ], + [ + 1531, + 530 + ], + [ + 1529, + 517 + ], + [ + 1529, + 505 + ], + [ + 1534, + 499 + ], + [ + 1543, + 493 + ], + [ + 1547, + 493 + ], + [ + 1548, + 478 + ], + [ + 1546, + 473 + ], + [ + 1536, + 477 + ], + [ + 1531, + 474 + ], + [ + 1541, + 467 + ], + [ + 1560, + 464 + ], + [ + 1572, + 465 + ], + [ + 1571, + 471 + ], + [ + 1564, + 475 + ], + [ + 1557, + 486 + ], + [ + 1556, + 498 + ], + [ + 1564, + 507 + ], + [ + 1566, + 500 + ], + [ + 1565, + 491 + ], + [ + 1561, + 485 + ], + [ + 1565, + 480 + ], + [ + 1572, + 480 + ], + [ + 1573, + 479 + ], + [ + 1589, + 477 + ], + [ + 1596, + 474 + ], + [ + 1603, + 476 + ], + [ + 1603, + 488 + ], + [ + 1597, + 492 + ], + [ + 1587, + 494 + ], + [ + 1594, + 500 + ], + [ + 1599, + 505 + ], + [ + 1602, + 514 + ], + [ + 1602, + 523 + ], + [ + 1599, + 533 + ], + [ + 1595, + 537 + ], + [ + 1584, + 538 + ], + [ + 1577, + 529 + ], + [ + 1575, + 525 + ], + [ + 1561, + 522 + ], + [ + 1559, + 524 + ], + [ + 1558, + 527 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1567, + 539 + ], + [ + 1560, + 540 + ], + [ + 1560, + 479 + ], + [ + 1567, + 479 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1622, + 535 + ], + [ + 1615, + 535 + ], + [ + 1608, + 528 + ], + [ + 1604, + 516 + ], + [ + 1603, + 508 + ], + [ + 1607, + 498 + ], + [ + 1612, + 497 + ], + [ + 1616, + 496 + ], + [ + 1616, + 481 + ], + [ + 1616, + 478 + ], + [ + 1632, + 468 + ], + [ + 1637, + 488 + ], + [ + 1644, + 490 + ], + [ + 1657, + 488 + ], + [ + 1657, + 479 + ], + [ + 1647, + 478 + ], + [ + 1650, + 470 + ], + [ + 1669, + 466 + ], + [ + 1680, + 463 + ], + [ + 1678, + 470 + ], + [ + 1670, + 477 + ], + [ + 1668, + 488 + ], + [ + 1678, + 500 + ], + [ + 1691, + 522 + ], + [ + 1695, + 533 + ], + [ + 1695, + 541 + ], + [ + 1683, + 541 + ], + [ + 1676, + 520 + ], + [ + 1670, + 511 + ], + [ + 1667, + 501 + ], + [ + 1664, + 499 + ], + [ + 1650, + 527 + ], + [ + 1646, + 529 + ], + [ + 1634, + 532 + ], + [ + 1630, + 537 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1681, + 476 + ], + [ + 1687, + 477 + ], + [ + 1689, + 544 + ], + [ + 1681, + 543 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1816, + 536 + ], + [ + 1815, + 541 + ], + [ + 1808, + 546 + ], + [ + 1796, + 542 + ], + [ + 1782, + 537 + ], + [ + 1775, + 530 + ], + [ + 1765, + 515 + ], + [ + 1773, + 511 + ], + [ + 1780, + 510 + ], + [ + 1793, + 512 + ], + [ + 1806, + 518 + ], + [ + 1812, + 525 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1855, + 477 + ], + [ + 1864, + 475 + ], + [ + 1872, + 535 + ], + [ + 1862, + 536 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1961, + 527 + ], + [ + 1969, + 529 + ], + [ + 1969, + 468 + ], + [ + 1960, + 469 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1643, + 636 + ], + [ + 1620, + 636 + ], + [ + 1617, + 115 + ], + [ + 1624, + 104 + ], + [ + 1623, + 1 + ], + [ + 1623, + 0 + ], + [ + 1635, + 0 + ], + [ + 1634, + 2 + ], + [ + 1634, + 106 + ], + [ + 1638, + 121 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1603, + 143 + ], + [ + 1582, + 128 + ], + [ + 1594, + 116 + ], + [ + 1601, + 121 + ], + [ + 1608, + 119 + ], + [ + 1622, + 118 + ], + [ + 1615, + 137 + ], + [ + 1608, + 137 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1578, + 155 + ], + [ + 1576, + 164 + ], + [ + 1542, + 166 + ], + [ + 1546, + 190 + ], + [ + 1571, + 194 + ], + [ + 1578, + 201 + ], + [ + 1578, + 210 + ], + [ + 1543, + 212 + ], + [ + 1547, + 236 + ], + [ + 1569, + 237 + ], + [ + 1577, + 246 + ], + [ + 1576, + 254 + ], + [ + 1542, + 253 + ], + [ + 1545, + 275 + ], + [ + 1569, + 280 + ], + [ + 1578, + 291 + ], + [ + 1579, + 293 + ], + [ + 1608, + 294 + ], + [ + 1607, + 154 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1655, + 153 + ], + [ + 1656, + 288 + ], + [ + 1610, + 289 + ], + [ + 1609, + 153 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1715, + 322 + ], + [ + 1703, + 322 + ], + [ + 1698, + 323 + ], + [ + 1698, + 331 + ], + [ + 1698, + 367 + ], + [ + 1702, + 368 + ], + [ + 1716, + 368 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1697, + 309 + ], + [ + 1695, + 295 + ], + [ + 1696, + 276 + ], + [ + 1698, + 263 + ], + [ + 1718, + 255 + ], + [ + 1726, + 269 + ], + [ + 1723, + 309 + ], + [ + 1715, + 322 + ], + [ + 1707, + 322 + ], + [ + 1699, + 313 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1779, + 616 + ], + [ + 1709, + 615 + ], + [ + 1710, + 15 + ], + [ + 1709, + 0 + ], + [ + 1764, + 0 + ], + [ + 1766, + 1 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1780, + 110 + ], + [ + 1776, + 110 + ], + [ + 1771, + 110 + ], + [ + 1771, + 123 + ], + [ + 1779, + 123 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1783, + 178 + ], + [ + 1781, + 150 + ], + [ + 1769, + 147 + ], + [ + 1770, + 179 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1773, + 214 + ], + [ + 1781, + 202 + ], + [ + 1771, + 192 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1793, + 238 + ], + [ + 1771, + 238 + ], + [ + 1774, + 319 + ], + [ + 1794, + 317 + ], + [ + 1795, + 306 + ], + [ + 1795, + 303 + ], + [ + 1807, + 300 + ], + [ + 1809, + 296 + ], + [ + 1807, + 287 + ], + [ + 1805, + 283 + ], + [ + 1792, + 284 + ], + [ + 1792, + 266 + ], + [ + 1808, + 259 + ], + [ + 1811, + 256 + ], + [ + 1807, + 248 + ], + [ + 1804, + 244 + ], + [ + 1792, + 245 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1797, + 377 + ], + [ + 1800, + 436 + ], + [ + 1780, + 436 + ], + [ + 1779, + 449 + ], + [ + 1776, + 450 + ], + [ + 1773, + 375 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1693, + 383 + ], + [ + 1734, + 384 + ], + [ + 1735, + 490 + ], + [ + 1695, + 485 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1695, + 579 + ], + [ + 1696, + 515 + ], + [ + 1716, + 514 + ], + [ + 1713, + 585 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1667, + 242 + ], + [ + 1710, + 197 + ], + [ + 1757, + 242 + ], + [ + 1713, + 287 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 720, + 290 + ], + [ + 726, + 290 + ], + [ + 732, + 422 + ], + [ + 724, + 423 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 746, + 404 + ], + [ + 787, + 402 + ], + [ + 787, + 427 + ], + [ + 743, + 428 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 753, + 382 + ], + [ + 722, + 383 + ], + [ + 721, + 433 + ], + [ + 752, + 436 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 708, + 429 + ], + [ + 707, + 442 + ], + [ + 712, + 518 + ], + [ + 759, + 518 + ], + [ + 759, + 503 + ], + [ + 768, + 504 + ], + [ + 775, + 507 + ], + [ + 803, + 505 + ], + [ + 802, + 481 + ], + [ + 775, + 483 + ], + [ + 780, + 425 + ], + [ + 761, + 422 + ], + [ + 733, + 423 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 700, + 443 + ], + [ + 729, + 443 + ], + [ + 726, + 519 + ], + [ + 701, + 519 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 686, + 462 + ], + [ + 683, + 473 + ], + [ + 685, + 483 + ], + [ + 687, + 497 + ], + [ + 692, + 513 + ], + [ + 697, + 517 + ], + [ + 704, + 517 + ], + [ + 710, + 513 + ], + [ + 710, + 505 + ], + [ + 705, + 498 + ], + [ + 704, + 483 + ], + [ + 707, + 478 + ], + [ + 713, + 474 + ], + [ + 713, + 465 + ], + [ + 705, + 462 + ], + [ + 702, + 454 + ], + [ + 699, + 446 + ], + [ + 694, + 444 + ], + [ + 687, + 446 + ], + [ + 684, + 456 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 693, + 537 + ], + [ + 704, + 537 + ], + [ + 697, + 285 + ], + [ + 697, + 273 + ], + [ + 703, + 266 + ], + [ + 714, + 258 + ], + [ + 721, + 256 + ], + [ + 826, + 228 + ], + [ + 842, + 228 + ], + [ + 865, + 228 + ], + [ + 865, + 225 + ], + [ + 825, + 225 + ], + [ + 719, + 250 + ], + [ + 705, + 256 + ], + [ + 694, + 262 + ], + [ + 687, + 277 + ], + [ + 688, + 298 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 881, + 270 + ], + [ + 856, + 270 + ], + [ + 857, + 219 + ], + [ + 880, + 217 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 700, + 318 + ], + [ + 709, + 317 + ], + [ + 713, + 323 + ], + [ + 724, + 324 + ], + [ + 722, + 326 + ], + [ + 715, + 328 + ], + [ + 714, + 333 + ], + [ + 719, + 336 + ], + [ + 720, + 339 + ], + [ + 718, + 341 + ], + [ + 713, + 341 + ], + [ + 713, + 348 + ], + [ + 718, + 352 + ], + [ + 719, + 353 + ], + [ + 718, + 356 + ], + [ + 714, + 356 + ], + [ + 714, + 365 + ], + [ + 718, + 366 + ], + [ + 718, + 369 + ], + [ + 715, + 373 + ], + [ + 713, + 376 + ], + [ + 699, + 375 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 669, + 426 + ], + [ + 654, + 411 + ], + [ + 670, + 397 + ], + [ + 683, + 410 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 677, + 373 + ], + [ + 671, + 369 + ], + [ + 662, + 375 + ], + [ + 657, + 385 + ], + [ + 659, + 396 + ], + [ + 663, + 403 + ], + [ + 673, + 403 + ], + [ + 683, + 396 + ], + [ + 685, + 387 + ], + [ + 683, + 377 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 656, + 350 + ], + [ + 656, + 334 + ], + [ + 695, + 335 + ], + [ + 696, + 349 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 687, + 376 + ], + [ + 688, + 423 + ], + [ + 703, + 423 + ], + [ + 703, + 421 + ], + [ + 709, + 417 + ], + [ + 709, + 413 + ], + [ + 704, + 410 + ], + [ + 702, + 408 + ], + [ + 703, + 406 + ], + [ + 707, + 405 + ], + [ + 708, + 401 + ], + [ + 705, + 397 + ], + [ + 700, + 396 + ], + [ + 700, + 391 + ], + [ + 706, + 391 + ], + [ + 707, + 384 + ], + [ + 702, + 381 + ], + [ + 698, + 381 + ], + [ + 698, + 377 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 634, + 522 + ], + [ + 640, + 492 + ], + [ + 634, + 488 + ], + [ + 618, + 487 + ], + [ + 611, + 493 + ], + [ + 616, + 519 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 600, + 537 + ], + [ + 594, + 248 + ], + [ + 594, + 242 + ], + [ + 597, + 241 + ], + [ + 601, + 244 + ], + [ + 601, + 260 + ], + [ + 604, + 261 + ], + [ + 604, + 249 + ], + [ + 604, + 239 + ], + [ + 607, + 240 + ], + [ + 610, + 249 + ], + [ + 611, + 257 + ], + [ + 611, + 263 + ], + [ + 616, + 264 + ], + [ + 616, + 253 + ], + [ + 614, + 242 + ], + [ + 607, + 234 + ], + [ + 601, + 232 + ], + [ + 599, + 230 + ], + [ + 592, + 230 + ], + [ + 588, + 238 + ], + [ + 588, + 239 + ], + [ + 581, + 232 + ], + [ + 573, + 232 + ], + [ + 564, + 235 + ], + [ + 559, + 242 + ], + [ + 552, + 253 + ], + [ + 552, + 263 + ], + [ + 555, + 264 + ], + [ + 556, + 264 + ], + [ + 556, + 251 + ], + [ + 560, + 244 + ], + [ + 565, + 240 + ], + [ + 574, + 239 + ], + [ + 581, + 241 + ], + [ + 586, + 247 + ], + [ + 590, + 256 + ], + [ + 591, + 539 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 570, + 278 + ], + [ + 557, + 265 + ], + [ + 552, + 265 + ], + [ + 544, + 279 + ], + [ + 547, + 282 + ], + [ + 557, + 282 + ], + [ + 563, + 281 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 606, + 263 + ], + [ + 617, + 265 + ], + [ + 626, + 278 + ], + [ + 618, + 283 + ], + [ + 610, + 284 + ], + [ + 605, + 278 + ], + [ + 604, + 269 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 599, + 263 + ], + [ + 606, + 263 + ], + [ + 613, + 275 + ], + [ + 607, + 281 + ], + [ + 602, + 282 + ], + [ + 592, + 280 + ], + [ + 589, + 274 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 379, + 360 + ], + [ + 386, + 361 + ], + [ + 391, + 546 + ], + [ + 378, + 546 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 375, + 375 + ], + [ + 364, + 375 + ], + [ + 359, + 376 + ], + [ + 360, + 381 + ], + [ + 367, + 385 + ], + [ + 368, + 390 + ], + [ + 362, + 390 + ], + [ + 356, + 392 + ], + [ + 357, + 394 + ], + [ + 361, + 398 + ], + [ + 367, + 400 + ], + [ + 367, + 403 + ], + [ + 362, + 405 + ], + [ + 359, + 409 + ], + [ + 361, + 414 + ], + [ + 367, + 416 + ], + [ + 377, + 419 + ], + [ + 380, + 419 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 393, + 399 + ], + [ + 397, + 404 + ], + [ + 399, + 409 + ], + [ + 397, + 417 + ], + [ + 392, + 422 + ], + [ + 381, + 423 + ], + [ + 375, + 417 + ], + [ + 370, + 410 + ], + [ + 371, + 406 + ], + [ + 374, + 401 + ], + [ + 379, + 397 + ], + [ + 386, + 397 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 242, + 532 + ], + [ + 242, + 506 + ], + [ + 252, + 504 + ], + [ + 272, + 503 + ], + [ + 272, + 531 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 263, + 464 + ], + [ + 259, + 460 + ], + [ + 255, + 458 + ], + [ + 244, + 475 + ], + [ + 232, + 469 + ], + [ + 226, + 478 + ], + [ + 229, + 494 + ], + [ + 252, + 499 + ], + [ + 256, + 505 + ], + [ + 266, + 507 + ], + [ + 270, + 499 + ], + [ + 271, + 487 + ], + [ + 271, + 477 + ], + [ + 270, + 464 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 225, + 155 + ], + [ + 234, + 155 + ], + [ + 241, + 539 + ], + [ + 231, + 539 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 153, + 4 + ], + [ + 164, + 505 + ], + [ + 170, + 506 + ], + [ + 162, + 7 + ], + [ + 161, + 0 + ], + [ + 153, + 0 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 125, + 212 + ], + [ + 125, + 242 + ], + [ + 170, + 254 + ], + [ + 166, + 222 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 92, + 302 + ], + [ + 94, + 271 + ], + [ + 165, + 270 + ], + [ + 166, + 301 + ], + [ + 135, + 303 + ], + [ + 136, + 314 + ], + [ + 107, + 316 + ], + [ + 107, + 304 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 145, + 382 + ], + [ + 142, + 310 + ], + [ + 157, + 313 + ], + [ + 177, + 320 + ], + [ + 180, + 385 + ], + [ + 161, + 388 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 64, + 469 + ], + [ + 60, + 464 + ], + [ + 54, + 463 + ], + [ + 42, + 468 + ], + [ + 44, + 482 + ], + [ + 52, + 493 + ], + [ + 69, + 491 + ], + [ + 68, + 477 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 91, + 461 + ], + [ + 83, + 454 + ], + [ + 73, + 457 + ], + [ + 71, + 462 + ], + [ + 67, + 468 + ], + [ + 62, + 473 + ], + [ + 53, + 480 + ], + [ + 57, + 500 + ], + [ + 74, + 501 + ], + [ + 93, + 492 + ], + [ + 93, + 484 + ], + [ + 90, + 475 + ], + [ + 88, + 469 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 115, + 617 + ], + [ + 97, + 613 + ], + [ + 89, + 613 + ], + [ + 80, + 600 + ], + [ + 70, + 602 + ], + [ + 62, + 607 + ], + [ + 56, + 612 + ], + [ + 46, + 616 + ], + [ + 30, + 616 + ], + [ + 27, + 616 + ], + [ + 14, + 619 + ], + [ + 0, + 617 + ], + [ + 0, + 612 + ], + [ + 0, + 491 + ], + [ + 0, + 480 + ], + [ + 6, + 481 + ], + [ + 11, + 483 + ], + [ + 22, + 487 + ], + [ + 34, + 488 + ], + [ + 51, + 489 + ], + [ + 70, + 490 + ], + [ + 90, + 491 + ], + [ + 95, + 491 + ], + [ + 89, + 485 + ], + [ + 94, + 476 + ], + [ + 112, + 478 + ], + [ + 129, + 478 + ], + [ + 140, + 483 + ], + [ + 154, + 488 + ], + [ + 172, + 490 + ], + [ + 177, + 498 + ], + [ + 191, + 497 + ], + [ + 203, + 499 + ], + [ + 224, + 499 + ], + [ + 229, + 499 + ], + [ + 229, + 508 + ], + [ + 225, + 513 + ], + [ + 219, + 530 + ], + [ + 236, + 531 + ], + [ + 252, + 542 + ], + [ + 266, + 553 + ], + [ + 267, + 572 + ], + [ + 265, + 590 + ], + [ + 260, + 595 + ], + [ + 246, + 597 + ], + [ + 221, + 600 + ], + [ + 205, + 592 + ], + [ + 205, + 602 + ], + [ + 197, + 610 + ], + [ + 173, + 611 + ], + [ + 166, + 616 + ], + [ + 155, + 616 + ], + [ + 142, + 614 + ], + [ + 129, + 611 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 13, + 439 + ], + [ + 37, + 415 + ], + [ + 54, + 402 + ], + [ + 77, + 407 + ], + [ + 87, + 390 + ], + [ + 122, + 381 + ], + [ + 131, + 368 + ], + [ + 94, + 361 + ], + [ + 112, + 356 + ], + [ + 133, + 347 + ], + [ + 140, + 339 + ], + [ + 123, + 340 + ], + [ + 44, + 331 + ], + [ + 25, + 326 + ], + [ + 39, + 302 + ], + [ + 64, + 278 + ], + [ + 76, + 270 + ], + [ + 73, + 256 + ], + [ + 56, + 257 + ], + [ + 63, + 249 + ], + [ + 76, + 245 + ], + [ + 66, + 235 + ], + [ + 39, + 245 + ], + [ + 13, + 251 + ], + [ + 0, + 250 + ], + [ + 0, + 438 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 125, + 212 + ], + [ + 125, + 242 + ], + [ + 170, + 254 + ], + [ + 166, + 222 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 92, + 302 + ], + [ + 94, + 271 + ], + [ + 165, + 270 + ], + [ + 166, + 301 + ], + [ + 135, + 303 + ], + [ + 136, + 314 + ], + [ + 107, + 316 + ], + [ + 107, + 304 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000123_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000123_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..bfc8c9f6bc2bf0c1e697bacc054fe95285599286 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000123_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000123_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000123_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..c811c1ee488449713f51d5bb4c84ce6963008130 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000123_000019_gtFine_polygons.json @@ -0,0 +1,4567 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 402, + 34 + ], + [ + 605, + 269 + ], + [ + 1084, + 266 + ], + [ + 1311, + 193 + ], + [ + 1325, + 0 + ], + [ + 383, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 726, + 468 + ], + [ + 774, + 462 + ], + [ + 852, + 456 + ], + [ + 954, + 454 + ], + [ + 1047, + 463 + ], + [ + 1315, + 477 + ], + [ + 2048, + 574 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 519 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 169, + 589 + ], + [ + 387, + 545 + ], + [ + 557, + 508 + ], + [ + 598, + 502 + ], + [ + 626, + 497 + ], + [ + 643, + 489 + ], + [ + 653, + 487 + ], + [ + 790, + 482 + ], + [ + 819, + 480 + ], + [ + 825, + 479 + ], + [ + 802, + 474 + ], + [ + 687, + 459 + ], + [ + 642, + 454 + ], + [ + 621, + 449 + ], + [ + 565, + 449 + ], + [ + 20, + 489 + ], + [ + 29, + 629 + ], + [ + 128, + 598 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 911, + 475 + ], + [ + 878, + 475 + ], + [ + 842, + 475 + ], + [ + 820, + 472 + ], + [ + 805, + 468 + ], + [ + 791, + 458 + ], + [ + 814, + 457 + ], + [ + 920, + 458 + ], + [ + 991, + 456 + ], + [ + 1021, + 452 + ], + [ + 1018, + 472 + ], + [ + 999, + 473 + ], + [ + 974, + 477 + ], + [ + 940, + 477 + ], + [ + 922, + 476 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1948, + 746 + ], + [ + 1414, + 602 + ], + [ + 1235, + 548 + ], + [ + 1117, + 516 + ], + [ + 1088, + 505 + ], + [ + 1076, + 488 + ], + [ + 1105, + 477 + ], + [ + 1163, + 469 + ], + [ + 1196, + 466 + ], + [ + 1237, + 462 + ], + [ + 1287, + 463 + ], + [ + 2048, + 533 + ], + [ + 2048, + 775 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1238, + 163 + ], + [ + 1231, + 168 + ], + [ + 1211, + 168 + ], + [ + 1210, + 175 + ], + [ + 1170, + 159 + ], + [ + 1155, + 145 + ], + [ + 1151, + 147 + ], + [ + 1123, + 136 + ], + [ + 1120, + 137 + ], + [ + 1109, + 143 + ], + [ + 1109, + 149 + ], + [ + 1106, + 149 + ], + [ + 1086, + 148 + ], + [ + 1023, + 88 + ], + [ + 1022, + 76 + ], + [ + 1017, + 76 + ], + [ + 1017, + 55 + ], + [ + 1013, + 53 + ], + [ + 1011, + 55 + ], + [ + 1011, + 76 + ], + [ + 1007, + 77 + ], + [ + 1003, + 83 + ], + [ + 1001, + 84 + ], + [ + 998, + 84 + ], + [ + 997, + 78 + ], + [ + 991, + 79 + ], + [ + 991, + 60 + ], + [ + 988, + 61 + ], + [ + 986, + 82 + ], + [ + 982, + 83 + ], + [ + 974, + 83 + ], + [ + 972, + 83 + ], + [ + 967, + 90 + ], + [ + 964, + 93 + ], + [ + 963, + 96 + ], + [ + 958, + 95 + ], + [ + 955, + 90 + ], + [ + 948, + 91 + ], + [ + 946, + 94 + ], + [ + 944, + 95 + ], + [ + 941, + 96 + ], + [ + 936, + 98 + ], + [ + 935, + 104 + ], + [ + 934, + 110 + ], + [ + 931, + 114 + ], + [ + 926, + 116 + ], + [ + 898, + 115 + ], + [ + 891, + 119 + ], + [ + 877, + 120 + ], + [ + 877, + 110 + ], + [ + 868, + 110 + ], + [ + 867, + 120 + ], + [ + 861, + 120 + ], + [ + 859, + 120 + ], + [ + 861, + 144 + ], + [ + 850, + 166 + ], + [ + 848, + 173 + ], + [ + 815, + 198 + ], + [ + 814, + 220 + ], + [ + 812, + 232 + ], + [ + 777, + 239 + ], + [ + 685, + 276 + ], + [ + 616, + 291 + ], + [ + 616, + 201 + ], + [ + 616, + 175 + ], + [ + 602, + 114 + ], + [ + 605, + 108 + ], + [ + 605, + 95 + ], + [ + 591, + 98 + ], + [ + 581, + 54 + ], + [ + 580, + 30 + ], + [ + 582, + 18 + ], + [ + 574, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 549 + ], + [ + 95, + 558 + ], + [ + 137, + 538 + ], + [ + 150, + 536 + ], + [ + 177, + 533 + ], + [ + 201, + 528 + ], + [ + 334, + 508 + ], + [ + 382, + 501 + ], + [ + 413, + 497 + ], + [ + 431, + 497 + ], + [ + 452, + 493 + ], + [ + 459, + 492 + ], + [ + 473, + 493 + ], + [ + 487, + 488 + ], + [ + 506, + 483 + ], + [ + 519, + 479 + ], + [ + 534, + 477 + ], + [ + 557, + 470 + ], + [ + 580, + 468 + ], + [ + 588, + 463 + ], + [ + 595, + 460 + ], + [ + 601, + 454 + ], + [ + 613, + 450 + ], + [ + 624, + 447 + ], + [ + 640, + 445 + ], + [ + 662, + 446 + ], + [ + 694, + 446 + ], + [ + 741, + 450 + ], + [ + 778, + 455 + ], + [ + 814, + 460 + ], + [ + 840, + 462 + ], + [ + 860, + 464 + ], + [ + 870, + 465 + ], + [ + 887, + 465 + ], + [ + 895, + 467 + ], + [ + 906, + 466 + ], + [ + 921, + 465 + ], + [ + 935, + 466 + ], + [ + 957, + 468 + ], + [ + 985, + 468 + ], + [ + 1016, + 468 + ], + [ + 1136, + 463 + ], + [ + 1269, + 459 + ], + [ + 1256, + 158 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1235, + 442 + ], + [ + 1211, + 440 + ], + [ + 1204, + 447 + ], + [ + 1200, + 462 + ], + [ + 1203, + 473 + ], + [ + 1215, + 475 + ], + [ + 1228, + 475 + ], + [ + 1237, + 467 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1008, + 427 + ], + [ + 1008, + 417 + ], + [ + 1003, + 415 + ], + [ + 994, + 422 + ], + [ + 994, + 430 + ], + [ + 992, + 448 + ], + [ + 994, + 460 + ], + [ + 1005, + 468 + ], + [ + 1007, + 458 + ], + [ + 1007, + 437 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1091, + 408 + ], + [ + 1085, + 405 + ], + [ + 1081, + 409 + ], + [ + 1078, + 421 + ], + [ + 1081, + 444 + ], + [ + 1086, + 444 + ], + [ + 1093, + 438 + ], + [ + 1092, + 421 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1199, + 442 + ], + [ + 1175, + 441 + ], + [ + 1168, + 440 + ], + [ + 1161, + 450 + ], + [ + 1161, + 460 + ], + [ + 1167, + 472 + ], + [ + 1180, + 475 + ], + [ + 1192, + 476 + ], + [ + 1198, + 479 + ], + [ + 1202, + 477 + ], + [ + 1212, + 477 + ], + [ + 1217, + 475 + ], + [ + 1215, + 466 + ], + [ + 1211, + 460 + ], + [ + 1210, + 457 + ], + [ + 1214, + 457 + ], + [ + 1215, + 453 + ], + [ + 1209, + 451 + ], + [ + 1205, + 451 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1142, + 439 + ], + [ + 1164, + 440 + ], + [ + 1169, + 444 + ], + [ + 1174, + 452 + ], + [ + 1178, + 453 + ], + [ + 1180, + 460 + ], + [ + 1181, + 470 + ], + [ + 1181, + 477 + ], + [ + 1171, + 479 + ], + [ + 1147, + 481 + ], + [ + 1119, + 474 + ], + [ + 1112, + 452 + ], + [ + 1116, + 445 + ], + [ + 1116, + 440 + ], + [ + 1122, + 438 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1178, + 339 + ], + [ + 1183, + 339 + ], + [ + 1183, + 480 + ], + [ + 1180, + 480 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1112, + 368 + ], + [ + 1110, + 410 + ], + [ + 1110, + 461 + ], + [ + 1116, + 439 + ], + [ + 1127, + 439 + ], + [ + 1124, + 357 + ], + [ + 1139, + 354 + ], + [ + 1157, + 352 + ], + [ + 1184, + 354 + ], + [ + 1204, + 352 + ], + [ + 1214, + 338 + ], + [ + 1208, + 327 + ], + [ + 1205, + 321 + ], + [ + 1188, + 324 + ], + [ + 1182, + 318 + ], + [ + 1196, + 311 + ], + [ + 1200, + 301 + ], + [ + 1205, + 291 + ], + [ + 1200, + 280 + ], + [ + 1210, + 270 + ], + [ + 1212, + 257 + ], + [ + 1204, + 257 + ], + [ + 1194, + 263 + ], + [ + 1181, + 263 + ], + [ + 1167, + 260 + ], + [ + 1169, + 252 + ], + [ + 1169, + 242 + ], + [ + 1169, + 228 + ], + [ + 1167, + 218 + ], + [ + 1158, + 216 + ], + [ + 1144, + 228 + ], + [ + 1142, + 235 + ], + [ + 1134, + 249 + ], + [ + 1128, + 237 + ], + [ + 1136, + 224 + ], + [ + 1118, + 222 + ], + [ + 1106, + 215 + ], + [ + 1100, + 198 + ], + [ + 1090, + 180 + ], + [ + 1075, + 172 + ], + [ + 1075, + 183 + ], + [ + 1088, + 200 + ], + [ + 1089, + 214 + ], + [ + 1075, + 204 + ], + [ + 1070, + 214 + ], + [ + 1070, + 227 + ], + [ + 1074, + 232 + ], + [ + 1068, + 240 + ], + [ + 1073, + 250 + ], + [ + 1079, + 261 + ], + [ + 1060, + 246 + ], + [ + 1048, + 257 + ], + [ + 1036, + 270 + ], + [ + 1031, + 286 + ], + [ + 1016, + 287 + ], + [ + 1012, + 305 + ], + [ + 1023, + 318 + ], + [ + 1036, + 330 + ], + [ + 1054, + 337 + ], + [ + 1045, + 344 + ], + [ + 1037, + 346 + ], + [ + 1054, + 353 + ], + [ + 1069, + 354 + ], + [ + 1087, + 356 + ], + [ + 1100, + 356 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1202, + 352 + ], + [ + 1202, + 333 + ], + [ + 1210, + 319 + ], + [ + 1210, + 317 + ], + [ + 1159, + 316 + ], + [ + 1157, + 354 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1104, + 439 + ], + [ + 1092, + 440 + ], + [ + 1077, + 440 + ], + [ + 1067, + 454 + ], + [ + 1074, + 478 + ], + [ + 1091, + 489 + ], + [ + 1103, + 482 + ], + [ + 1114, + 481 + ], + [ + 1126, + 481 + ], + [ + 1137, + 480 + ], + [ + 1139, + 473 + ], + [ + 1138, + 460 + ], + [ + 1132, + 450 + ], + [ + 1125, + 448 + ], + [ + 1113, + 441 + ], + [ + 1111, + 440 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1128, + 490 + ], + [ + 1110, + 489 + ], + [ + 1089, + 489 + ], + [ + 1079, + 478 + ], + [ + 1080, + 465 + ], + [ + 1081, + 452 + ], + [ + 1089, + 446 + ], + [ + 1103, + 446 + ], + [ + 1113, + 445 + ], + [ + 1125, + 445 + ], + [ + 1132, + 449 + ], + [ + 1133, + 459 + ], + [ + 1140, + 464 + ], + [ + 1146, + 468 + ], + [ + 1149, + 476 + ], + [ + 1149, + 487 + ], + [ + 1143, + 491 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1074, + 431 + ], + [ + 1067, + 428 + ], + [ + 1056, + 430 + ], + [ + 1039, + 429 + ], + [ + 1026, + 429 + ], + [ + 1016, + 432 + ], + [ + 1010, + 436 + ], + [ + 1004, + 443 + ], + [ + 1000, + 450 + ], + [ + 998, + 448 + ], + [ + 993, + 449 + ], + [ + 991, + 449 + ], + [ + 990, + 452 + ], + [ + 990, + 455 + ], + [ + 996, + 459 + ], + [ + 991, + 473 + ], + [ + 991, + 485 + ], + [ + 995, + 499 + ], + [ + 1026, + 485 + ], + [ + 1070, + 458 + ], + [ + 1086, + 458 + ], + [ + 1086, + 451 + ], + [ + 1085, + 442 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1128, + 490 + ], + [ + 1110, + 489 + ], + [ + 1089, + 489 + ], + [ + 1079, + 478 + ], + [ + 1080, + 465 + ], + [ + 1081, + 452 + ], + [ + 1089, + 446 + ], + [ + 1103, + 446 + ], + [ + 1113, + 445 + ], + [ + 1125, + 445 + ], + [ + 1132, + 449 + ], + [ + 1133, + 459 + ], + [ + 1140, + 464 + ], + [ + 1146, + 468 + ], + [ + 1149, + 476 + ], + [ + 1149, + 487 + ], + [ + 1143, + 491 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1014, + 505 + ], + [ + 1004, + 502 + ], + [ + 999, + 494 + ], + [ + 999, + 486 + ], + [ + 1001, + 471 + ], + [ + 1004, + 461 + ], + [ + 1000, + 461 + ], + [ + 996, + 456 + ], + [ + 998, + 452 + ], + [ + 1002, + 452 + ], + [ + 1008, + 452 + ], + [ + 1015, + 443 + ], + [ + 1025, + 436 + ], + [ + 1044, + 434 + ], + [ + 1064, + 434 + ], + [ + 1075, + 438 + ], + [ + 1085, + 453 + ], + [ + 1089, + 461 + ], + [ + 1091, + 471 + ], + [ + 1093, + 498 + ], + [ + 1092, + 508 + ], + [ + 1077, + 507 + ], + [ + 1075, + 502 + ], + [ + 1074, + 496 + ], + [ + 1035, + 496 + ], + [ + 1029, + 495 + ], + [ + 1024, + 500 + ], + [ + 1022, + 505 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1168, + 487 + ], + [ + 1147, + 487 + ], + [ + 1151, + 105 + ], + [ + 1160, + 105 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1155, + 509 + ], + [ + 1146, + 508 + ], + [ + 1145, + 355 + ], + [ + 1142, + 315 + ], + [ + 1141, + 270 + ], + [ + 1133, + 234 + ], + [ + 1115, + 206 + ], + [ + 1096, + 181 + ], + [ + 1065, + 164 + ], + [ + 1041, + 153 + ], + [ + 1044, + 148 + ], + [ + 1094, + 176 + ], + [ + 1115, + 190 + ], + [ + 1138, + 223 + ], + [ + 1143, + 249 + ], + [ + 1149, + 282 + ], + [ + 1153, + 316 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1010, + 145 + ], + [ + 1017, + 137 + ], + [ + 1026, + 138 + ], + [ + 1038, + 145 + ], + [ + 1045, + 149 + ], + [ + 1043, + 154 + ], + [ + 1039, + 158 + ], + [ + 1034, + 160 + ], + [ + 1023, + 159 + ], + [ + 1014, + 153 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1136, + 333 + ], + [ + 1135, + 339 + ], + [ + 1137, + 348 + ], + [ + 1139, + 352 + ], + [ + 1145, + 353 + ], + [ + 1154, + 348 + ], + [ + 1156, + 342 + ], + [ + 1157, + 338 + ], + [ + 1155, + 327 + ], + [ + 1145, + 324 + ], + [ + 1135, + 329 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1226, + 9 + ], + [ + 1228, + 54 + ], + [ + 1223, + 60 + ], + [ + 1226, + 65 + ], + [ + 1221, + 104 + ], + [ + 1226, + 112 + ], + [ + 1229, + 112 + ], + [ + 1231, + 236 + ], + [ + 1230, + 253 + ], + [ + 1223, + 255 + ], + [ + 1225, + 263 + ], + [ + 1226, + 274 + ], + [ + 1226, + 293 + ], + [ + 1217, + 298 + ], + [ + 1220, + 310 + ], + [ + 1229, + 317 + ], + [ + 1224, + 450 + ], + [ + 1226, + 497 + ], + [ + 1282, + 506 + ], + [ + 1307, + 511 + ], + [ + 1334, + 507 + ], + [ + 1338, + 511 + ], + [ + 1355, + 511 + ], + [ + 1370, + 512 + ], + [ + 1406, + 508 + ], + [ + 1409, + 514 + ], + [ + 1436, + 519 + ], + [ + 1611, + 541 + ], + [ + 1873, + 575 + ], + [ + 2004, + 591 + ], + [ + 2048, + 599 + ], + [ + 2048, + 599 + ], + [ + 2048, + 0 + ], + [ + 1222, + 0 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1211, + 501 + ], + [ + 1206, + 495 + ], + [ + 1213, + 470 + ], + [ + 1220, + 458 + ], + [ + 1225, + 457 + ], + [ + 1230, + 466 + ], + [ + 1231, + 480 + ], + [ + 1230, + 491 + ], + [ + 1230, + 501 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 817, + 444 + ], + [ + 809, + 407 + ], + [ + 802, + 376 + ], + [ + 799, + 356 + ], + [ + 802, + 348 + ], + [ + 813, + 341 + ], + [ + 823, + 339 + ], + [ + 839, + 324 + ], + [ + 853, + 267 + ], + [ + 851, + 82 + ], + [ + 836, + 76 + ], + [ + 823, + 81 + ], + [ + 807, + 91 + ], + [ + 794, + 94 + ], + [ + 784, + 100 + ], + [ + 779, + 102 + ], + [ + 772, + 92 + ], + [ + 764, + 85 + ], + [ + 739, + 156 + ], + [ + 727, + 220 + ], + [ + 736, + 323 + ], + [ + 767, + 339 + ], + [ + 775, + 351 + ], + [ + 785, + 374 + ], + [ + 798, + 389 + ], + [ + 808, + 429 + ], + [ + 812, + 454 + ], + [ + 822, + 454 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 842, + 442 + ], + [ + 830, + 444 + ], + [ + 824, + 444 + ], + [ + 821, + 443 + ], + [ + 812, + 441 + ], + [ + 807, + 439 + ], + [ + 798, + 442 + ], + [ + 792, + 442 + ], + [ + 782, + 442 + ], + [ + 762, + 440 + ], + [ + 750, + 440 + ], + [ + 743, + 442 + ], + [ + 737, + 438 + ], + [ + 726, + 438 + ], + [ + 717, + 438 + ], + [ + 714, + 436 + ], + [ + 698, + 437 + ], + [ + 691, + 439 + ], + [ + 678, + 440 + ], + [ + 678, + 441 + ], + [ + 657, + 444 + ], + [ + 646, + 442 + ], + [ + 638, + 440 + ], + [ + 627, + 443 + ], + [ + 623, + 446 + ], + [ + 623, + 450 + ], + [ + 627, + 456 + ], + [ + 644, + 456 + ], + [ + 658, + 458 + ], + [ + 672, + 463 + ], + [ + 686, + 464 + ], + [ + 694, + 460 + ], + [ + 703, + 458 + ], + [ + 713, + 459 + ], + [ + 735, + 461 + ], + [ + 768, + 462 + ], + [ + 793, + 466 + ], + [ + 800, + 469 + ], + [ + 810, + 472 + ], + [ + 823, + 471 + ], + [ + 832, + 467 + ], + [ + 840, + 464 + ], + [ + 851, + 464 + ], + [ + 858, + 466 + ], + [ + 858, + 461 + ], + [ + 853, + 452 + ], + [ + 851, + 445 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 611, + 440 + ], + [ + 604, + 440 + ], + [ + 593, + 440 + ], + [ + 592, + 447 + ], + [ + 593, + 457 + ], + [ + 601, + 461 + ], + [ + 607, + 460 + ], + [ + 609, + 458 + ], + [ + 616, + 456 + ], + [ + 623, + 455 + ], + [ + 623, + 446 + ], + [ + 620, + 436 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 642, + 36 + ], + [ + 645, + 9 + ], + [ + 618, + 6 + ], + [ + 614, + 32 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 688, + 409 + ], + [ + 688, + 437 + ], + [ + 693, + 469 + ], + [ + 698, + 468 + ], + [ + 696, + 433 + ], + [ + 699, + 400 + ], + [ + 697, + 375 + ], + [ + 699, + 363 + ], + [ + 713, + 363 + ], + [ + 734, + 361 + ], + [ + 758, + 352 + ], + [ + 782, + 319 + ], + [ + 774, + 138 + ], + [ + 765, + 105 + ], + [ + 752, + 88 + ], + [ + 740, + 86 + ], + [ + 718, + 86 + ], + [ + 703, + 75 + ], + [ + 685, + 67 + ], + [ + 656, + 70 + ], + [ + 629, + 78 + ], + [ + 618, + 95 + ], + [ + 613, + 113 + ], + [ + 604, + 126 + ], + [ + 603, + 147 + ], + [ + 613, + 179 + ], + [ + 638, + 189 + ], + [ + 642, + 201 + ], + [ + 642, + 212 + ], + [ + 625, + 216 + ], + [ + 604, + 235 + ], + [ + 612, + 262 + ], + [ + 616, + 276 + ], + [ + 608, + 295 + ], + [ + 605, + 311 + ], + [ + 607, + 339 + ], + [ + 613, + 369 + ], + [ + 615, + 380 + ], + [ + 638, + 377 + ], + [ + 632, + 390 + ], + [ + 627, + 427 + ], + [ + 627, + 428 + ], + [ + 629, + 443 + ], + [ + 633, + 441 + ], + [ + 635, + 420 + ], + [ + 637, + 403 + ], + [ + 643, + 381 + ], + [ + 652, + 374 + ], + [ + 643, + 394 + ], + [ + 643, + 425 + ], + [ + 639, + 439 + ], + [ + 639, + 443 + ], + [ + 638, + 463 + ], + [ + 638, + 471 + ], + [ + 645, + 471 + ], + [ + 649, + 442 + ], + [ + 652, + 421 + ], + [ + 660, + 392 + ], + [ + 663, + 378 + ], + [ + 674, + 377 + ], + [ + 685, + 367 + ], + [ + 690, + 362 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 741, + 377 + ], + [ + 747, + 405 + ], + [ + 754, + 433 + ], + [ + 753, + 441 + ], + [ + 756, + 441 + ], + [ + 755, + 416 + ], + [ + 755, + 397 + ], + [ + 752, + 384 + ], + [ + 750, + 373 + ], + [ + 760, + 368 + ], + [ + 761, + 371 + ], + [ + 761, + 398 + ], + [ + 760, + 428 + ], + [ + 759, + 441 + ], + [ + 767, + 441 + ], + [ + 768, + 424 + ], + [ + 768, + 403 + ], + [ + 768, + 380 + ], + [ + 769, + 369 + ], + [ + 777, + 369 + ], + [ + 796, + 357 + ], + [ + 800, + 343 + ], + [ + 802, + 296 + ], + [ + 754, + 269 + ], + [ + 703, + 276 + ], + [ + 682, + 309 + ], + [ + 688, + 331 + ], + [ + 711, + 357 + ], + [ + 733, + 376 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 723, + 383 + ], + [ + 716, + 379 + ], + [ + 713, + 367 + ], + [ + 718, + 364 + ], + [ + 732, + 362 + ], + [ + 741, + 368 + ], + [ + 740, + 380 + ], + [ + 731, + 381 + ], + [ + 727, + 381 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 721, + 399 + ], + [ + 722, + 365 + ], + [ + 725, + 365 + ], + [ + 726, + 477 + ], + [ + 723, + 477 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 833, + 425 + ], + [ + 836, + 405 + ], + [ + 851, + 408 + ], + [ + 850, + 423 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 849, + 435 + ], + [ + 846, + 388 + ], + [ + 844, + 364 + ], + [ + 835, + 345 + ], + [ + 826, + 321 + ], + [ + 820, + 173 + ], + [ + 828, + 134 + ], + [ + 849, + 98 + ], + [ + 865, + 80 + ], + [ + 875, + 83 + ], + [ + 892, + 113 + ], + [ + 910, + 153 + ], + [ + 906, + 204 + ], + [ + 910, + 226 + ], + [ + 910, + 254 + ], + [ + 901, + 273 + ], + [ + 876, + 266 + ], + [ + 872, + 283 + ], + [ + 884, + 295 + ], + [ + 896, + 314 + ], + [ + 886, + 337 + ], + [ + 871, + 346 + ], + [ + 851, + 356 + ], + [ + 850, + 373 + ], + [ + 857, + 469 + ], + [ + 847, + 470 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 834, + 476 + ], + [ + 830, + 474 + ], + [ + 829, + 448 + ], + [ + 836, + 449 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 703, + 477 + ], + [ + 691, + 475 + ], + [ + 685, + 461 + ], + [ + 692, + 451 + ], + [ + 700, + 453 + ], + [ + 714, + 448 + ], + [ + 727, + 447 + ], + [ + 737, + 448 + ], + [ + 748, + 450 + ], + [ + 756, + 450 + ], + [ + 776, + 450 + ], + [ + 787, + 446 + ], + [ + 797, + 447 + ], + [ + 805, + 454 + ], + [ + 808, + 464 + ], + [ + 810, + 469 + ], + [ + 799, + 474 + ], + [ + 797, + 481 + ], + [ + 777, + 477 + ], + [ + 761, + 475 + ], + [ + 747, + 477 + ], + [ + 733, + 480 + ], + [ + 722, + 480 + ], + [ + 708, + 476 + ], + [ + 667, + 476 + ], + [ + 647, + 470 + ], + [ + 633, + 459 + ], + [ + 645, + 442 + ], + [ + 659, + 446 + ], + [ + 672, + 455 + ], + [ + 685, + 457 + ], + [ + 695, + 455 + ], + [ + 702, + 451 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 791, + 431 + ], + [ + 798, + 431 + ], + [ + 797, + 482 + ], + [ + 791, + 480 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 782, + 432 + ], + [ + 785, + 479 + ], + [ + 768, + 477 + ], + [ + 755, + 475 + ], + [ + 759, + 446 + ], + [ + 766, + 438 + ], + [ + 769, + 437 + ], + [ + 770, + 431 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 705, + 481 + ], + [ + 695, + 478 + ], + [ + 695, + 469 + ], + [ + 699, + 463 + ], + [ + 705, + 460 + ], + [ + 709, + 459 + ], + [ + 709, + 454 + ], + [ + 703, + 450 + ], + [ + 707, + 447 + ], + [ + 714, + 447 + ], + [ + 724, + 450 + ], + [ + 731, + 450 + ], + [ + 732, + 455 + ], + [ + 723, + 456 + ], + [ + 719, + 459 + ], + [ + 723, + 469 + ], + [ + 722, + 479 + ], + [ + 716, + 482 + ], + [ + 708, + 483 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 891, + 387 + ], + [ + 897, + 387 + ], + [ + 897, + 473 + ], + [ + 889, + 474 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 910, + 411 + ], + [ + 905, + 410 + ], + [ + 900, + 415 + ], + [ + 900, + 422 + ], + [ + 905, + 423 + ], + [ + 910, + 423 + ], + [ + 913, + 419 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 934, + 404 + ], + [ + 934, + 395 + ], + [ + 906, + 394 + ], + [ + 906, + 402 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 874, + 404 + ], + [ + 874, + 474 + ], + [ + 879, + 474 + ], + [ + 876, + 400 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 868, + 411 + ], + [ + 867, + 391 + ], + [ + 867, + 384 + ], + [ + 871, + 380 + ], + [ + 885, + 381 + ], + [ + 880, + 386 + ], + [ + 881, + 412 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 671, + 450 + ], + [ + 697, + 450 + ], + [ + 696, + 483 + ], + [ + 670, + 483 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 747, + 482 + ], + [ + 741, + 482 + ], + [ + 741, + 446 + ], + [ + 745, + 446 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 687, + 485 + ], + [ + 683, + 486 + ], + [ + 683, + 454 + ], + [ + 688, + 454 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 603, + 463 + ], + [ + 603, + 422 + ], + [ + 608, + 420 + ], + [ + 606, + 466 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 615, + 408 + ], + [ + 618, + 410 + ], + [ + 618, + 457 + ], + [ + 614, + 456 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 652, + 131 + ], + [ + 662, + 131 + ], + [ + 669, + 480 + ], + [ + 652, + 481 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 622, + 486 + ], + [ + 618, + 478 + ], + [ + 617, + 465 + ], + [ + 618, + 456 + ], + [ + 628, + 454 + ], + [ + 627, + 452 + ], + [ + 625, + 445 + ], + [ + 633, + 440 + ], + [ + 638, + 447 + ], + [ + 640, + 453 + ], + [ + 650, + 453 + ], + [ + 654, + 453 + ], + [ + 651, + 447 + ], + [ + 659, + 443 + ], + [ + 666, + 449 + ], + [ + 668, + 454 + ], + [ + 671, + 458 + ], + [ + 667, + 464 + ], + [ + 670, + 471 + ], + [ + 668, + 478 + ], + [ + 659, + 483 + ], + [ + 647, + 483 + ], + [ + 644, + 484 + ], + [ + 638, + 485 + ], + [ + 633, + 485 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 757, + 211 + ], + [ + 757, + 229 + ], + [ + 740, + 230 + ], + [ + 740, + 213 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 597, + 413 + ], + [ + 598, + 392 + ], + [ + 574, + 393 + ], + [ + 574, + 416 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 547, + 441 + ], + [ + 533, + 451 + ], + [ + 531, + 464 + ], + [ + 533, + 476 + ], + [ + 516, + 476 + ], + [ + 512, + 462 + ], + [ + 515, + 406 + ], + [ + 512, + 371 + ], + [ + 512, + 343 + ], + [ + 521, + 338 + ], + [ + 533, + 349 + ], + [ + 542, + 356 + ], + [ + 549, + 367 + ], + [ + 555, + 381 + ], + [ + 556, + 394 + ], + [ + 561, + 408 + ], + [ + 561, + 423 + ], + [ + 551, + 438 + ], + [ + 539, + 443 + ], + [ + 533, + 450 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 544, + 312 + ], + [ + 588, + 314 + ], + [ + 586, + 381 + ], + [ + 542, + 381 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 571, + 497 + ], + [ + 559, + 496 + ], + [ + 561, + 337 + ], + [ + 564, + 311 + ], + [ + 572, + 285 + ], + [ + 602, + 249 + ], + [ + 637, + 234 + ], + [ + 645, + 231 + ], + [ + 645, + 234 + ], + [ + 619, + 243 + ], + [ + 603, + 254 + ], + [ + 583, + 276 + ], + [ + 575, + 290 + ], + [ + 570, + 308 + ], + [ + 568, + 325 + ], + [ + 568, + 351 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 671, + 222 + ], + [ + 672, + 229 + ], + [ + 669, + 236 + ], + [ + 666, + 242 + ], + [ + 652, + 241 + ], + [ + 646, + 238 + ], + [ + 646, + 232 + ], + [ + 644, + 229 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 403, + 525 + ], + [ + 394, + 524 + ], + [ + 387, + 305 + ], + [ + 395, + 304 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 337, + 268 + ], + [ + 340, + 356 + ], + [ + 397, + 355 + ], + [ + 394, + 269 + ], + [ + 394, + 267 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 404, + 378 + ], + [ + 408, + 433 + ], + [ + 384, + 434 + ], + [ + 382, + 381 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 420, + 447 + ], + [ + 420, + 490 + ], + [ + 412, + 495 + ], + [ + 396, + 490 + ], + [ + 396, + 460 + ], + [ + 398, + 445 + ], + [ + 403, + 441 + ], + [ + 414, + 443 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 321, + 413 + ], + [ + 311, + 411 + ], + [ + 305, + 418 + ], + [ + 305, + 425 + ], + [ + 303, + 428 + ], + [ + 296, + 431 + ], + [ + 289, + 448 + ], + [ + 282, + 464 + ], + [ + 278, + 485 + ], + [ + 282, + 495 + ], + [ + 282, + 505 + ], + [ + 295, + 506 + ], + [ + 301, + 506 + ], + [ + 307, + 519 + ], + [ + 306, + 527 + ], + [ + 311, + 531 + ], + [ + 322, + 528 + ], + [ + 318, + 514 + ], + [ + 316, + 494 + ], + [ + 318, + 510 + ], + [ + 321, + 519 + ], + [ + 320, + 529 + ], + [ + 330, + 527 + ], + [ + 334, + 511 + ], + [ + 332, + 499 + ], + [ + 327, + 491 + ], + [ + 331, + 480 + ], + [ + 334, + 471 + ], + [ + 335, + 460 + ], + [ + 340, + 456 + ], + [ + 340, + 450 + ], + [ + 330, + 436 + ], + [ + 324, + 430 + ], + [ + 325, + 422 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 262, + 259 + ], + [ + 261, + 159 + ], + [ + 234, + 151 + ], + [ + 211, + 147 + ], + [ + 185, + 152 + ], + [ + 168, + 158 + ], + [ + 171, + 260 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 66, + 192 + ], + [ + 53, + 173 + ], + [ + 35, + 164 + ], + [ + 19, + 159 + ], + [ + 4, + 162 + ], + [ + 0, + 166 + ], + [ + 0, + 231 + ], + [ + 2, + 238 + ], + [ + 15, + 243 + ], + [ + 34, + 242 + ], + [ + 50, + 236 + ], + [ + 64, + 220 + ], + [ + 67, + 197 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 19, + 355 + ], + [ + 44, + 361 + ], + [ + 58, + 374 + ], + [ + 71, + 379 + ], + [ + 84, + 388 + ], + [ + 101, + 436 + ], + [ + 126, + 513 + ], + [ + 131, + 556 + ], + [ + 135, + 596 + ], + [ + 134, + 618 + ], + [ + 132, + 660 + ], + [ + 118, + 681 + ], + [ + 102, + 691 + ], + [ + 72, + 691 + ], + [ + 57, + 686 + ], + [ + 55, + 668 + ], + [ + 46, + 663 + ], + [ + 22, + 661 + ], + [ + 0, + 666 + ], + [ + 0, + 350 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 934, + 404 + ], + [ + 934, + 395 + ], + [ + 906, + 394 + ], + [ + 906, + 402 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000125_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000125_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..b21ce7ad7553fb229b169ea3c3f467fef3b80150 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000125_000019_gtFine_polygons.json @@ -0,0 +1,5838 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "terrain", + "polygon": [ + [ + 1093, + 502 + ], + [ + 1095, + 518 + ], + [ + 1087, + 528 + ], + [ + 1073, + 533 + ], + [ + 1059, + 533 + ], + [ + 1031, + 533 + ], + [ + 1015, + 533 + ], + [ + 996, + 535 + ], + [ + 981, + 535 + ], + [ + 968, + 523 + ], + [ + 971, + 505 + ], + [ + 988, + 503 + ], + [ + 1004, + 498 + ], + [ + 1018, + 491 + ], + [ + 1044, + 491 + ], + [ + 1064, + 491 + ], + [ + 1087, + 494 + ], + [ + 1095, + 505 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1645, + 615 + ], + [ + 1561, + 613 + ], + [ + 1471, + 605 + ], + [ + 1400, + 598 + ], + [ + 1377, + 593 + ], + [ + 1367, + 585 + ], + [ + 1363, + 578 + ], + [ + 1349, + 570 + ], + [ + 1351, + 553 + ], + [ + 1366, + 547 + ], + [ + 1387, + 540 + ], + [ + 1414, + 535 + ], + [ + 1436, + 530 + ], + [ + 1460, + 527 + ], + [ + 1511, + 528 + ], + [ + 1550, + 528 + ], + [ + 1579, + 524 + ], + [ + 1600, + 514 + ], + [ + 1623, + 508 + ], + [ + 1660, + 516 + ], + [ + 1680, + 523 + ], + [ + 1707, + 515 + ], + [ + 1738, + 506 + ], + [ + 1777, + 506 + ], + [ + 1810, + 506 + ], + [ + 1843, + 508 + ], + [ + 1868, + 508 + ], + [ + 1905, + 507 + ], + [ + 1926, + 487 + ], + [ + 1976, + 490 + ], + [ + 2010, + 490 + ], + [ + 2048, + 486 + ], + [ + 2048, + 506 + ], + [ + 2048, + 604 + ], + [ + 1957, + 607 + ], + [ + 1875, + 615 + ], + [ + 1761, + 616 + ], + [ + 1663, + 615 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 62, + 41 + ], + [ + 156, + 346 + ], + [ + 581, + 363 + ], + [ + 1059, + 307 + ], + [ + 1291, + 0 + ], + [ + 56, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 455, + 424 + ], + [ + 725, + 422 + ], + [ + 1039, + 463 + ], + [ + 1618, + 557 + ], + [ + 2048, + 592 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 385 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1981, + 713 + ], + [ + 1840, + 713 + ], + [ + 1693, + 711 + ], + [ + 1562, + 705 + ], + [ + 1463, + 698 + ], + [ + 1362, + 691 + ], + [ + 1260, + 680 + ], + [ + 1173, + 658 + ], + [ + 898, + 592 + ], + [ + 849, + 578 + ], + [ + 803, + 561 + ], + [ + 783, + 557 + ], + [ + 733, + 555 + ], + [ + 622, + 551 + ], + [ + 591, + 552 + ], + [ + 590, + 546 + ], + [ + 616, + 542 + ], + [ + 663, + 539 + ], + [ + 725, + 535 + ], + [ + 754, + 531 + ], + [ + 750, + 515 + ], + [ + 763, + 507 + ], + [ + 803, + 509 + ], + [ + 881, + 516 + ], + [ + 1065, + 538 + ], + [ + 1183, + 533 + ], + [ + 1353, + 533 + ], + [ + 1476, + 560 + ], + [ + 2041, + 570 + ], + [ + 2048, + 568 + ], + [ + 2048, + 713 + ], + [ + 2021, + 713 + ], + [ + 1999, + 713 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 898, + 595 + ], + [ + 1265, + 574 + ], + [ + 1352, + 567 + ], + [ + 1378, + 567 + ], + [ + 1401, + 570 + ], + [ + 1402, + 585 + ], + [ + 1367, + 589 + ], + [ + 1354, + 596 + ], + [ + 1071, + 630 + ], + [ + 1051, + 630 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 255, + 554 + ], + [ + 174, + 557 + ], + [ + 145, + 559 + ], + [ + 102, + 559 + ], + [ + 82, + 548 + ], + [ + 131, + 539 + ], + [ + 213, + 531 + ], + [ + 296, + 509 + ], + [ + 391, + 488 + ], + [ + 436, + 479 + ], + [ + 460, + 473 + ], + [ + 473, + 473 + ], + [ + 469, + 484 + ], + [ + 463, + 493 + ], + [ + 464, + 501 + ], + [ + 449, + 509 + ], + [ + 381, + 528 + ], + [ + 352, + 541 + ], + [ + 305, + 548 + ], + [ + 275, + 550 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1404, + 35 + ], + [ + 1465, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 513 + ], + [ + 2027, + 502 + ], + [ + 1707, + 508 + ], + [ + 1416, + 506 + ], + [ + 1115, + 489 + ], + [ + 918, + 482 + ], + [ + 860, + 482 + ], + [ + 819, + 482 + ], + [ + 791, + 485 + ], + [ + 772, + 487 + ], + [ + 701, + 487 + ], + [ + 630, + 477 + ], + [ + 565, + 467 + ], + [ + 535, + 465 + ], + [ + 503, + 470 + ], + [ + 472, + 475 + ], + [ + 440, + 477 + ], + [ + 414, + 484 + ], + [ + 348, + 496 + ], + [ + 249, + 507 + ], + [ + 122, + 521 + ], + [ + 0, + 511 + ], + [ + 0, + 0 + ], + [ + 134, + 0 + ], + [ + 130, + 2 + ], + [ + 130, + 23 + ], + [ + 144, + 38 + ], + [ + 145, + 70 + ], + [ + 130, + 72 + ], + [ + 131, + 105 + ], + [ + 145, + 118 + ], + [ + 146, + 150 + ], + [ + 135, + 150 + ], + [ + 135, + 153 + ], + [ + 195, + 195 + ], + [ + 194, + 201 + ], + [ + 203, + 205 + ], + [ + 224, + 204 + ], + [ + 256, + 227 + ], + [ + 416, + 297 + ], + [ + 554, + 356 + ], + [ + 644, + 344 + ], + [ + 1243, + 118 + ], + [ + 1302, + 82 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 0, + 463 + ], + [ + 39, + 461 + ], + [ + 81, + 466 + ], + [ + 86, + 468 + ], + [ + 127, + 470 + ], + [ + 137, + 470 + ], + [ + 166, + 473 + ], + [ + 172, + 475 + ], + [ + 184, + 484 + ], + [ + 197, + 483 + ], + [ + 206, + 483 + ], + [ + 217, + 483 + ], + [ + 200, + 472 + ], + [ + 197, + 472 + ], + [ + 197, + 464 + ], + [ + 195, + 458 + ], + [ + 206, + 455 + ], + [ + 225, + 452 + ], + [ + 234, + 452 + ], + [ + 249, + 454 + ], + [ + 281, + 453 + ], + [ + 325, + 459 + ], + [ + 331, + 454 + ], + [ + 343, + 452 + ], + [ + 351, + 451 + ], + [ + 357, + 447 + ], + [ + 374, + 438 + ], + [ + 376, + 438 + ], + [ + 379, + 438 + ], + [ + 389, + 452 + ], + [ + 392, + 453 + ], + [ + 395, + 455 + ], + [ + 407, + 473 + ], + [ + 409, + 477 + ], + [ + 407, + 485 + ], + [ + 383, + 487 + ], + [ + 348, + 502 + ], + [ + 333, + 505 + ], + [ + 302, + 510 + ], + [ + 283, + 517 + ], + [ + 260, + 525 + ], + [ + 242, + 529 + ], + [ + 224, + 531 + ], + [ + 198, + 532 + ], + [ + 190, + 533 + ], + [ + 172, + 533 + ], + [ + 155, + 531 + ], + [ + 132, + 533 + ], + [ + 109, + 533 + ], + [ + 2, + 535 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 527, + 464 + ], + [ + 526, + 450 + ], + [ + 532, + 448 + ], + [ + 537, + 446 + ], + [ + 538, + 457 + ], + [ + 540, + 465 + ], + [ + 542, + 459 + ], + [ + 541, + 449 + ], + [ + 545, + 446 + ], + [ + 551, + 450 + ], + [ + 552, + 458 + ], + [ + 554, + 468 + ], + [ + 561, + 464 + ], + [ + 562, + 452 + ], + [ + 564, + 446 + ], + [ + 576, + 441 + ], + [ + 589, + 441 + ], + [ + 598, + 441 + ], + [ + 602, + 447 + ], + [ + 604, + 451 + ], + [ + 602, + 454 + ], + [ + 588, + 454 + ], + [ + 577, + 462 + ], + [ + 574, + 467 + ], + [ + 596, + 468 + ], + [ + 613, + 464 + ], + [ + 629, + 464 + ], + [ + 630, + 469 + ], + [ + 635, + 477 + ], + [ + 653, + 477 + ], + [ + 674, + 474 + ], + [ + 688, + 450 + ], + [ + 681, + 332 + ], + [ + 668, + 280 + ], + [ + 650, + 263 + ], + [ + 636, + 263 + ], + [ + 629, + 270 + ], + [ + 627, + 278 + ], + [ + 619, + 284 + ], + [ + 613, + 281 + ], + [ + 597, + 281 + ], + [ + 582, + 283 + ], + [ + 578, + 289 + ], + [ + 585, + 298 + ], + [ + 578, + 296 + ], + [ + 566, + 296 + ], + [ + 554, + 301 + ], + [ + 552, + 311 + ], + [ + 554, + 321 + ], + [ + 548, + 330 + ], + [ + 547, + 319 + ], + [ + 547, + 311 + ], + [ + 536, + 307 + ], + [ + 526, + 304 + ], + [ + 511, + 307 + ], + [ + 497, + 319 + ], + [ + 490, + 340 + ], + [ + 477, + 391 + ], + [ + 473, + 417 + ], + [ + 475, + 439 + ], + [ + 470, + 446 + ], + [ + 468, + 462 + ], + [ + 475, + 473 + ], + [ + 484, + 475 + ], + [ + 492, + 464 + ], + [ + 494, + 456 + ], + [ + 493, + 446 + ], + [ + 498, + 445 + ], + [ + 501, + 454 + ], + [ + 500, + 467 + ], + [ + 507, + 464 + ], + [ + 507, + 449 + ], + [ + 512, + 445 + ], + [ + 518, + 449 + ], + [ + 518, + 460 + ], + [ + 517, + 466 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 477, + 488 + ], + [ + 466, + 485 + ], + [ + 465, + 477 + ], + [ + 469, + 470 + ], + [ + 477, + 469 + ], + [ + 483, + 468 + ], + [ + 490, + 467 + ], + [ + 498, + 464 + ], + [ + 499, + 464 + ], + [ + 505, + 462 + ], + [ + 516, + 462 + ], + [ + 519, + 466 + ], + [ + 527, + 466 + ], + [ + 529, + 463 + ], + [ + 535, + 462 + ], + [ + 539, + 462 + ], + [ + 553, + 462 + ], + [ + 566, + 458 + ], + [ + 573, + 461 + ], + [ + 580, + 461 + ], + [ + 587, + 461 + ], + [ + 584, + 464 + ], + [ + 577, + 470 + ], + [ + 573, + 473 + ], + [ + 563, + 473 + ], + [ + 555, + 472 + ], + [ + 546, + 472 + ], + [ + 533, + 472 + ], + [ + 523, + 472 + ], + [ + 516, + 473 + ], + [ + 517, + 475 + ], + [ + 520, + 482 + ], + [ + 520, + 484 + ], + [ + 513, + 487 + ], + [ + 503, + 489 + ], + [ + 491, + 490 + ], + [ + 487, + 488 + ], + [ + 486, + 484 + ], + [ + 483, + 482 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 490, + 450 + ], + [ + 491, + 464 + ], + [ + 473, + 464 + ], + [ + 473, + 451 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 616, + 438 + ], + [ + 616, + 466 + ], + [ + 608, + 466 + ], + [ + 608, + 439 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 393, + 436 + ], + [ + 398, + 462 + ], + [ + 427, + 465 + ], + [ + 428, + 439 + ], + [ + 428, + 436 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 414, + 470 + ], + [ + 406, + 470 + ], + [ + 402, + 467 + ], + [ + 400, + 455 + ], + [ + 393, + 460 + ], + [ + 391, + 466 + ], + [ + 383, + 471 + ], + [ + 385, + 482 + ], + [ + 398, + 482 + ], + [ + 399, + 493 + ], + [ + 407, + 488 + ], + [ + 408, + 481 + ], + [ + 419, + 482 + ], + [ + 422, + 479 + ], + [ + 420, + 472 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 470, + 496 + ], + [ + 466, + 473 + ], + [ + 464, + 448 + ], + [ + 464, + 431 + ], + [ + 469, + 422 + ], + [ + 487, + 408 + ], + [ + 505, + 373 + ], + [ + 524, + 310 + ], + [ + 528, + 292 + ], + [ + 516, + 282 + ], + [ + 506, + 272 + ], + [ + 496, + 260 + ], + [ + 485, + 255 + ], + [ + 474, + 249 + ], + [ + 464, + 236 + ], + [ + 462, + 224 + ], + [ + 452, + 219 + ], + [ + 446, + 209 + ], + [ + 449, + 192 + ], + [ + 461, + 187 + ], + [ + 469, + 189 + ], + [ + 469, + 179 + ], + [ + 463, + 177 + ], + [ + 450, + 175 + ], + [ + 446, + 160 + ], + [ + 436, + 150 + ], + [ + 426, + 145 + ], + [ + 429, + 129 + ], + [ + 436, + 118 + ], + [ + 446, + 111 + ], + [ + 454, + 105 + ], + [ + 462, + 99 + ], + [ + 452, + 90 + ], + [ + 461, + 87 + ], + [ + 477, + 84 + ], + [ + 478, + 75 + ], + [ + 464, + 76 + ], + [ + 464, + 64 + ], + [ + 457, + 53 + ], + [ + 450, + 59 + ], + [ + 442, + 75 + ], + [ + 438, + 67 + ], + [ + 433, + 71 + ], + [ + 430, + 75 + ], + [ + 426, + 66 + ], + [ + 422, + 53 + ], + [ + 430, + 46 + ], + [ + 444, + 45 + ], + [ + 451, + 35 + ], + [ + 444, + 21 + ], + [ + 432, + 25 + ], + [ + 426, + 18 + ], + [ + 431, + 12 + ], + [ + 437, + 10 + ], + [ + 436, + 0 + ], + [ + 262, + 0 + ], + [ + 252, + 4 + ], + [ + 247, + 11 + ], + [ + 261, + 12 + ], + [ + 276, + 19 + ], + [ + 282, + 29 + ], + [ + 271, + 40 + ], + [ + 261, + 52 + ], + [ + 258, + 58 + ], + [ + 268, + 59 + ], + [ + 273, + 62 + ], + [ + 267, + 67 + ], + [ + 244, + 70 + ], + [ + 237, + 80 + ], + [ + 236, + 88 + ], + [ + 231, + 98 + ], + [ + 244, + 102 + ], + [ + 254, + 95 + ], + [ + 268, + 104 + ], + [ + 270, + 112 + ], + [ + 267, + 127 + ], + [ + 261, + 138 + ], + [ + 251, + 152 + ], + [ + 244, + 162 + ], + [ + 251, + 164 + ], + [ + 262, + 169 + ], + [ + 264, + 178 + ], + [ + 268, + 183 + ], + [ + 276, + 194 + ], + [ + 276, + 204 + ], + [ + 269, + 201 + ], + [ + 258, + 192 + ], + [ + 251, + 191 + ], + [ + 250, + 201 + ], + [ + 252, + 212 + ], + [ + 248, + 222 + ], + [ + 240, + 228 + ], + [ + 232, + 228 + ], + [ + 221, + 230 + ], + [ + 205, + 238 + ], + [ + 190, + 247 + ], + [ + 187, + 262 + ], + [ + 201, + 277 + ], + [ + 198, + 291 + ], + [ + 202, + 299 + ], + [ + 203, + 309 + ], + [ + 204, + 314 + ], + [ + 214, + 314 + ], + [ + 208, + 319 + ], + [ + 207, + 326 + ], + [ + 214, + 333 + ], + [ + 221, + 335 + ], + [ + 216, + 346 + ], + [ + 219, + 349 + ], + [ + 225, + 352 + ], + [ + 221, + 361 + ], + [ + 233, + 361 + ], + [ + 239, + 362 + ], + [ + 230, + 368 + ], + [ + 219, + 377 + ], + [ + 221, + 387 + ], + [ + 233, + 391 + ], + [ + 242, + 392 + ], + [ + 245, + 394 + ], + [ + 236, + 408 + ], + [ + 244, + 411 + ], + [ + 259, + 410 + ], + [ + 270, + 403 + ], + [ + 284, + 396 + ], + [ + 301, + 394 + ], + [ + 315, + 391 + ], + [ + 320, + 393 + ], + [ + 318, + 520 + ], + [ + 317, + 533 + ], + [ + 341, + 533 + ], + [ + 339, + 494 + ], + [ + 341, + 473 + ], + [ + 340, + 449 + ], + [ + 341, + 423 + ], + [ + 357, + 417 + ], + [ + 366, + 422 + ], + [ + 363, + 521 + ], + [ + 371, + 521 + ], + [ + 368, + 422 + ], + [ + 376, + 417 + ], + [ + 379, + 422 + ], + [ + 381, + 512 + ], + [ + 383, + 512 + ], + [ + 381, + 440 + ], + [ + 386, + 429 + ], + [ + 399, + 429 + ], + [ + 412, + 428 + ], + [ + 422, + 426 + ], + [ + 421, + 486 + ], + [ + 421, + 501 + ], + [ + 425, + 503 + ], + [ + 426, + 480 + ], + [ + 427, + 446 + ], + [ + 427, + 423 + ], + [ + 438, + 422 + ], + [ + 448, + 425 + ], + [ + 454, + 438 + ], + [ + 452, + 492 + ], + [ + 457, + 503 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 359, + 374 + ], + [ + 357, + 443 + ], + [ + 327, + 442 + ], + [ + 332, + 376 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 286, + 494 + ], + [ + 272, + 494 + ], + [ + 273, + 507 + ], + [ + 264, + 512 + ], + [ + 268, + 524 + ], + [ + 273, + 525 + ], + [ + 273, + 518 + ], + [ + 284, + 513 + ], + [ + 292, + 509 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 276, + 449 + ], + [ + 277, + 419 + ], + [ + 305, + 413 + ], + [ + 304, + 449 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 273, + 414 + ], + [ + 288, + 392 + ], + [ + 303, + 409 + ], + [ + 305, + 417 + ], + [ + 270, + 418 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 295, + 546 + ], + [ + 286, + 545 + ], + [ + 286, + 304 + ], + [ + 291, + 304 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 316, + 303 + ], + [ + 294, + 301 + ], + [ + 288, + 307 + ], + [ + 289, + 312 + ], + [ + 299, + 311 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 249, + 500 + ], + [ + 251, + 526 + ], + [ + 236, + 530 + ], + [ + 236, + 502 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 140, + 487 + ], + [ + 143, + 545 + ], + [ + 163, + 545 + ], + [ + 177, + 535 + ], + [ + 173, + 485 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 120, + 433 + ], + [ + 118, + 430 + ], + [ + 107, + 427 + ], + [ + 98, + 433 + ], + [ + 97, + 444 + ], + [ + 102, + 450 + ], + [ + 111, + 452 + ], + [ + 118, + 449 + ], + [ + 124, + 439 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 98, + 527 + ], + [ + 109, + 428 + ], + [ + 117, + 427 + ], + [ + 104, + 537 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 69, + 505 + ], + [ + 67, + 215 + ], + [ + 65, + 213 + ], + [ + 28, + 209 + ], + [ + 29, + 206 + ], + [ + 69, + 210 + ], + [ + 104, + 207 + ], + [ + 105, + 210 + ], + [ + 73, + 216 + ], + [ + 78, + 507 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 135, + 203 + ], + [ + 137, + 212 + ], + [ + 111, + 217 + ], + [ + 103, + 214 + ], + [ + 103, + 207 + ], + [ + 108, + 202 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 33, + 214 + ], + [ + 22, + 213 + ], + [ + 0, + 208 + ], + [ + 0, + 198 + ], + [ + 27, + 204 + ], + [ + 31, + 205 + ], + [ + 32, + 208 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 550, + 483 + ], + [ + 545, + 483 + ], + [ + 544, + 486 + ], + [ + 540, + 486 + ], + [ + 541, + 479 + ], + [ + 541, + 472 + ], + [ + 545, + 468 + ], + [ + 551, + 468 + ], + [ + 557, + 468 + ], + [ + 559, + 472 + ], + [ + 560, + 478 + ], + [ + 560, + 485 + ], + [ + 557, + 486 + ], + [ + 556, + 484 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 452, + 520 + ], + [ + 452, + 536 + ], + [ + 450, + 543 + ], + [ + 446, + 545 + ], + [ + 436, + 546 + ], + [ + 433, + 542 + ], + [ + 390, + 544 + ], + [ + 389, + 545 + ], + [ + 386, + 547 + ], + [ + 374, + 547 + ], + [ + 373, + 540 + ], + [ + 373, + 532 + ], + [ + 375, + 519 + ], + [ + 378, + 512 + ], + [ + 385, + 499 + ], + [ + 392, + 491 + ], + [ + 406, + 490 + ], + [ + 419, + 490 + ], + [ + 434, + 491 + ], + [ + 441, + 493 + ], + [ + 444, + 500 + ], + [ + 446, + 503 + ], + [ + 449, + 503 + ], + [ + 453, + 503 + ], + [ + 454, + 508 + ], + [ + 448, + 510 + ], + [ + 450, + 518 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 14, + 448 + ], + [ + 14, + 492 + ], + [ + 7, + 492 + ], + [ + 8, + 447 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 139, + 667 + ], + [ + 132, + 676 + ], + [ + 120, + 681 + ], + [ + 106, + 680 + ], + [ + 99, + 676 + ], + [ + 99, + 671 + ], + [ + 88, + 670 + ], + [ + 70, + 670 + ], + [ + 51, + 678 + ], + [ + 51, + 684 + ], + [ + 48, + 698 + ], + [ + 41, + 704 + ], + [ + 27, + 709 + ], + [ + 14, + 709 + ], + [ + 0, + 705 + ], + [ + 0, + 489 + ], + [ + 6, + 489 + ], + [ + 29, + 490 + ], + [ + 57, + 492 + ], + [ + 82, + 503 + ], + [ + 107, + 523 + ], + [ + 119, + 540 + ], + [ + 122, + 548 + ], + [ + 135, + 560 + ], + [ + 142, + 577 + ], + [ + 144, + 625 + ], + [ + 142, + 655 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1025, + 549 + ], + [ + 837, + 515 + ], + [ + 741, + 523 + ], + [ + 735, + 487 + ], + [ + 777, + 485 + ], + [ + 875, + 480 + ], + [ + 949, + 485 + ], + [ + 1052, + 481 + ], + [ + 1138, + 480 + ], + [ + 1192, + 484 + ], + [ + 1230, + 505 + ], + [ + 1406, + 539 + ], + [ + 1391, + 553 + ], + [ + 1362, + 563 + ], + [ + 1273, + 573 + ], + [ + 1238, + 574 + ], + [ + 1133, + 561 + ], + [ + 1103, + 553 + ], + [ + 1076, + 553 + ], + [ + 1039, + 553 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 715, + 257 + ], + [ + 704, + 251 + ], + [ + 699, + 249 + ], + [ + 686, + 250 + ], + [ + 675, + 256 + ], + [ + 666, + 263 + ], + [ + 663, + 270 + ], + [ + 667, + 277 + ], + [ + 660, + 285 + ], + [ + 664, + 383 + ], + [ + 663, + 453 + ], + [ + 669, + 475 + ], + [ + 693, + 486 + ], + [ + 722, + 486 + ], + [ + 751, + 490 + ], + [ + 758, + 477 + ], + [ + 782, + 472 + ], + [ + 794, + 467 + ], + [ + 807, + 463 + ], + [ + 819, + 456 + ], + [ + 832, + 453 + ], + [ + 852, + 453 + ], + [ + 860, + 455 + ], + [ + 876, + 461 + ], + [ + 881, + 466 + ], + [ + 879, + 477 + ], + [ + 883, + 487 + ], + [ + 910, + 494 + ], + [ + 938, + 494 + ], + [ + 963, + 491 + ], + [ + 992, + 491 + ], + [ + 992, + 491 + ], + [ + 1083, + 491 + ], + [ + 1139, + 492 + ], + [ + 1149, + 499 + ], + [ + 1158, + 519 + ], + [ + 1169, + 532 + ], + [ + 1189, + 537 + ], + [ + 1228, + 547 + ], + [ + 1240, + 547 + ], + [ + 1278, + 547 + ], + [ + 1351, + 546 + ], + [ + 1413, + 543 + ], + [ + 1462, + 539 + ], + [ + 1488, + 545 + ], + [ + 1846, + 515 + ], + [ + 1859, + 487 + ], + [ + 1875, + 448 + ], + [ + 1871, + 408 + ], + [ + 1860, + 354 + ], + [ + 1827, + 353 + ], + [ + 1772, + 356 + ], + [ + 1742, + 329 + ], + [ + 1676, + 340 + ], + [ + 1651, + 368 + ], + [ + 1650, + 402 + ], + [ + 1655, + 447 + ], + [ + 1654, + 475 + ], + [ + 1621, + 484 + ], + [ + 1593, + 466 + ], + [ + 1578, + 453 + ], + [ + 1546, + 443 + ], + [ + 1522, + 440 + ], + [ + 1507, + 436 + ], + [ + 1479, + 411 + ], + [ + 1472, + 397 + ], + [ + 1448, + 390 + ], + [ + 1454, + 415 + ], + [ + 1461, + 448 + ], + [ + 1461, + 471 + ], + [ + 1438, + 470 + ], + [ + 1400, + 437 + ], + [ + 1397, + 423 + ], + [ + 1396, + 402 + ], + [ + 1427, + 384 + ], + [ + 1487, + 387 + ], + [ + 1506, + 401 + ], + [ + 1539, + 395 + ], + [ + 1563, + 411 + ], + [ + 1592, + 430 + ], + [ + 1622, + 428 + ], + [ + 1628, + 396 + ], + [ + 1663, + 382 + ], + [ + 1723, + 345 + ], + [ + 1763, + 334 + ], + [ + 1791, + 314 + ], + [ + 1824, + 317 + ], + [ + 1838, + 338 + ], + [ + 1849, + 355 + ], + [ + 1865, + 484 + ], + [ + 1887, + 483 + ], + [ + 1894, + 452 + ], + [ + 1890, + 403 + ], + [ + 1887, + 363 + ], + [ + 1917, + 337 + ], + [ + 1946, + 325 + ], + [ + 1974, + 326 + ], + [ + 2001, + 321 + ], + [ + 2019, + 307 + ], + [ + 2048, + 280 + ], + [ + 2048, + 0 + ], + [ + 1584, + 0 + ], + [ + 1585, + 9 + ], + [ + 1593, + 30 + ], + [ + 1591, + 56 + ], + [ + 1581, + 74 + ], + [ + 1561, + 89 + ], + [ + 1557, + 76 + ], + [ + 1565, + 55 + ], + [ + 1552, + 54 + ], + [ + 1542, + 55 + ], + [ + 1525, + 39 + ], + [ + 1507, + 39 + ], + [ + 1497, + 22 + ], + [ + 1506, + 0 + ], + [ + 1093, + 0 + ], + [ + 1085, + 5 + ], + [ + 1092, + 16 + ], + [ + 1114, + 22 + ], + [ + 1117, + 29 + ], + [ + 1121, + 35 + ], + [ + 1132, + 29 + ], + [ + 1143, + 17 + ], + [ + 1156, + 15 + ], + [ + 1159, + 26 + ], + [ + 1153, + 38 + ], + [ + 1138, + 47 + ], + [ + 1131, + 54 + ], + [ + 1117, + 51 + ], + [ + 1103, + 67 + ], + [ + 1096, + 66 + ], + [ + 1092, + 48 + ], + [ + 1092, + 34 + ], + [ + 1080, + 27 + ], + [ + 1065, + 31 + ], + [ + 1054, + 36 + ], + [ + 1059, + 44 + ], + [ + 1052, + 55 + ], + [ + 1045, + 56 + ], + [ + 1028, + 48 + ], + [ + 1016, + 52 + ], + [ + 992, + 58 + ], + [ + 992, + 70 + ], + [ + 988, + 77 + ], + [ + 980, + 69 + ], + [ + 971, + 67 + ], + [ + 966, + 73 + ], + [ + 969, + 83 + ], + [ + 963, + 87 + ], + [ + 957, + 79 + ], + [ + 944, + 82 + ], + [ + 922, + 89 + ], + [ + 914, + 85 + ], + [ + 907, + 84 + ], + [ + 891, + 87 + ], + [ + 887, + 91 + ], + [ + 887, + 98 + ], + [ + 881, + 106 + ], + [ + 874, + 117 + ], + [ + 869, + 130 + ], + [ + 869, + 136 + ], + [ + 859, + 134 + ], + [ + 858, + 144 + ], + [ + 850, + 144 + ], + [ + 846, + 152 + ], + [ + 847, + 160 + ], + [ + 845, + 164 + ], + [ + 837, + 163 + ], + [ + 823, + 153 + ], + [ + 821, + 158 + ], + [ + 824, + 167 + ], + [ + 826, + 174 + ], + [ + 821, + 176 + ], + [ + 820, + 183 + ], + [ + 823, + 189 + ], + [ + 820, + 198 + ], + [ + 815, + 200 + ], + [ + 812, + 198 + ], + [ + 807, + 204 + ], + [ + 804, + 208 + ], + [ + 799, + 214 + ], + [ + 796, + 222 + ], + [ + 795, + 233 + ], + [ + 795, + 241 + ], + [ + 791, + 244 + ], + [ + 789, + 240 + ], + [ + 780, + 238 + ], + [ + 769, + 242 + ], + [ + 759, + 248 + ], + [ + 745, + 251 + ], + [ + 734, + 256 + ], + [ + 722, + 261 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 659, + 434 + ], + [ + 660, + 455 + ], + [ + 656, + 457 + ], + [ + 654, + 463 + ], + [ + 654, + 469 + ], + [ + 640, + 472 + ], + [ + 639, + 463 + ], + [ + 645, + 461 + ], + [ + 641, + 437 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 714, + 436 + ], + [ + 717, + 481 + ], + [ + 711, + 483 + ], + [ + 707, + 436 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 731, + 454 + ], + [ + 731, + 484 + ], + [ + 737, + 484 + ], + [ + 738, + 456 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 749, + 452 + ], + [ + 749, + 488 + ], + [ + 742, + 489 + ], + [ + 742, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 789, + 414 + ], + [ + 791, + 501 + ], + [ + 798, + 501 + ], + [ + 795, + 412 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 829, + 498 + ], + [ + 830, + 452 + ], + [ + 823, + 452 + ], + [ + 823, + 498 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 849, + 505 + ], + [ + 840, + 505 + ], + [ + 838, + 441 + ], + [ + 846, + 441 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 873, + 496 + ], + [ + 874, + 454 + ], + [ + 855, + 452 + ], + [ + 855, + 464 + ], + [ + 865, + 479 + ], + [ + 865, + 495 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 944, + 502 + ], + [ + 934, + 389 + ], + [ + 942, + 390 + ], + [ + 953, + 502 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 580, + 480 + ], + [ + 574, + 476 + ], + [ + 575, + 466 + ], + [ + 582, + 466 + ], + [ + 585, + 463 + ], + [ + 601, + 463 + ], + [ + 622, + 462 + ], + [ + 626, + 462 + ], + [ + 631, + 465 + ], + [ + 634, + 471 + ], + [ + 657, + 478 + ], + [ + 685, + 477 + ], + [ + 714, + 477 + ], + [ + 741, + 477 + ], + [ + 754, + 486 + ], + [ + 754, + 501 + ], + [ + 755, + 524 + ], + [ + 740, + 531 + ], + [ + 730, + 536 + ], + [ + 734, + 538 + ], + [ + 716, + 538 + ], + [ + 699, + 536 + ], + [ + 662, + 538 + ], + [ + 643, + 538 + ], + [ + 638, + 546 + ], + [ + 627, + 547 + ], + [ + 619, + 540 + ], + [ + 608, + 539 + ], + [ + 602, + 533 + ], + [ + 601, + 529 + ], + [ + 595, + 524 + ], + [ + 584, + 524 + ], + [ + 577, + 518 + ], + [ + 578, + 512 + ], + [ + 577, + 505 + ], + [ + 588, + 500 + ], + [ + 588, + 496 + ], + [ + 588, + 491 + ], + [ + 582, + 490 + ], + [ + 581, + 486 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 693, + 302 + ], + [ + 701, + 547 + ], + [ + 706, + 547 + ], + [ + 697, + 300 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 692, + 295 + ], + [ + 673, + 294 + ], + [ + 670, + 294 + ], + [ + 669, + 295 + ], + [ + 671, + 299 + ], + [ + 690, + 301 + ], + [ + 698, + 304 + ], + [ + 698, + 298 + ], + [ + 696, + 297 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 713, + 407 + ], + [ + 714, + 438 + ], + [ + 681, + 438 + ], + [ + 682, + 407 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 693, + 457 + ], + [ + 688, + 450 + ], + [ + 688, + 445 + ], + [ + 694, + 439 + ], + [ + 700, + 439 + ], + [ + 703, + 440 + ], + [ + 704, + 446 + ], + [ + 705, + 452 + ], + [ + 703, + 455 + ], + [ + 698, + 458 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 826, + 544 + ], + [ + 817, + 548 + ], + [ + 809, + 546 + ], + [ + 803, + 538 + ], + [ + 798, + 541 + ], + [ + 788, + 541 + ], + [ + 782, + 540 + ], + [ + 778, + 546 + ], + [ + 772, + 549 + ], + [ + 760, + 551 + ], + [ + 753, + 547 + ], + [ + 748, + 537 + ], + [ + 738, + 539 + ], + [ + 730, + 542 + ], + [ + 724, + 547 + ], + [ + 713, + 547 + ], + [ + 709, + 539 + ], + [ + 718, + 524 + ], + [ + 727, + 518 + ], + [ + 726, + 512 + ], + [ + 718, + 508 + ], + [ + 729, + 502 + ], + [ + 736, + 500 + ], + [ + 737, + 509 + ], + [ + 750, + 519 + ], + [ + 763, + 513 + ], + [ + 762, + 509 + ], + [ + 767, + 503 + ], + [ + 781, + 493 + ], + [ + 787, + 502 + ], + [ + 787, + 510 + ], + [ + 794, + 505 + ], + [ + 806, + 505 + ], + [ + 809, + 512 + ], + [ + 825, + 513 + ], + [ + 828, + 519 + ], + [ + 831, + 527 + ], + [ + 832, + 538 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 960, + 425 + ], + [ + 962, + 519 + ], + [ + 1065, + 518 + ], + [ + 1060, + 420 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1101, + 507 + ], + [ + 1104, + 554 + ], + [ + 1096, + 556 + ], + [ + 1095, + 509 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1131, + 503 + ], + [ + 1137, + 546 + ], + [ + 1150, + 545 + ], + [ + 1150, + 488 + ], + [ + 1131, + 489 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1121, + 512 + ], + [ + 1123, + 559 + ], + [ + 1133, + 559 + ], + [ + 1131, + 511 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1139, + 507 + ], + [ + 1142, + 553 + ], + [ + 1134, + 555 + ], + [ + 1132, + 510 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1142, + 507 + ], + [ + 1143, + 556 + ], + [ + 1153, + 556 + ], + [ + 1152, + 506 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1994, + 472 + ], + [ + 1991, + 465 + ], + [ + 1982, + 465 + ], + [ + 1973, + 473 + ], + [ + 1965, + 490 + ], + [ + 1962, + 505 + ], + [ + 1962, + 510 + ], + [ + 1973, + 510 + ], + [ + 1973, + 500 + ], + [ + 1976, + 487 + ], + [ + 1983, + 478 + ], + [ + 1986, + 482 + ], + [ + 1986, + 495 + ], + [ + 1989, + 507 + ], + [ + 2002, + 508 + ], + [ + 2003, + 498 + ], + [ + 2000, + 484 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1520, + 486 + ], + [ + 1512, + 489 + ], + [ + 1497, + 495 + ], + [ + 1490, + 499 + ], + [ + 1488, + 511 + ], + [ + 1493, + 521 + ], + [ + 1501, + 535 + ], + [ + 1524, + 535 + ], + [ + 1621, + 540 + ], + [ + 1651, + 531 + ], + [ + 1683, + 528 + ], + [ + 1689, + 512 + ], + [ + 1687, + 494 + ], + [ + 1676, + 474 + ], + [ + 1664, + 465 + ], + [ + 1645, + 460 + ], + [ + 1621, + 458 + ], + [ + 1569, + 458 + ], + [ + 1557, + 459 + ], + [ + 1538, + 466 + ], + [ + 1527, + 475 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1813, + 457 + ], + [ + 1784, + 458 + ], + [ + 1762, + 465 + ], + [ + 1739, + 479 + ], + [ + 1723, + 511 + ], + [ + 1756, + 530 + ], + [ + 1872, + 530 + ], + [ + 1905, + 522 + ], + [ + 1930, + 518 + ], + [ + 1932, + 501 + ], + [ + 1925, + 481 + ], + [ + 1913, + 467 + ], + [ + 1901, + 459 + ], + [ + 1875, + 457 + ], + [ + 1841, + 457 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1206, + 630 + ], + [ + 1172, + 630 + ], + [ + 1165, + 3 + ], + [ + 1165, + 0 + ], + [ + 1192, + 0 + ], + [ + 1193, + 6 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1272, + 345 + ], + [ + 1283, + 550 + ], + [ + 1293, + 548 + ], + [ + 1283, + 344 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1290, + 337 + ], + [ + 1263, + 332 + ], + [ + 1250, + 327 + ], + [ + 1250, + 311 + ], + [ + 1284, + 308 + ], + [ + 1285, + 300 + ], + [ + 1268, + 299 + ], + [ + 1249, + 293 + ], + [ + 1249, + 279 + ], + [ + 1285, + 273 + ], + [ + 1286, + 264 + ], + [ + 1275, + 267 + ], + [ + 1247, + 256 + ], + [ + 1246, + 242 + ], + [ + 1260, + 235 + ], + [ + 1280, + 233 + ], + [ + 1294, + 237 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1313, + 651 + ], + [ + 1290, + 654 + ], + [ + 1288, + 185 + ], + [ + 1298, + 186 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1269, + 212 + ], + [ + 1258, + 220 + ], + [ + 1259, + 229 + ], + [ + 1275, + 234 + ], + [ + 1284, + 228 + ], + [ + 1279, + 217 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1322, + 171 + ], + [ + 1322, + 211 + ], + [ + 1266, + 211 + ], + [ + 1267, + 172 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1258, + 162 + ], + [ + 1253, + 149 + ], + [ + 1252, + 132 + ], + [ + 1254, + 114 + ], + [ + 1265, + 104 + ], + [ + 1281, + 97 + ], + [ + 1303, + 97 + ], + [ + 1322, + 104 + ], + [ + 1331, + 121 + ], + [ + 1333, + 136 + ], + [ + 1327, + 156 + ], + [ + 1315, + 168 + ], + [ + 1297, + 173 + ], + [ + 1282, + 173 + ], + [ + 1266, + 167 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1337, + 341 + ], + [ + 1310, + 341 + ], + [ + 1313, + 224 + ], + [ + 1336, + 223 + ], + [ + 1336, + 230 + ], + [ + 1368, + 228 + ], + [ + 1372, + 230 + ], + [ + 1369, + 241 + ], + [ + 1359, + 251 + ], + [ + 1337, + 256 + ], + [ + 1337, + 267 + ], + [ + 1370, + 268 + ], + [ + 1369, + 278 + ], + [ + 1359, + 284 + ], + [ + 1349, + 293 + ], + [ + 1338, + 295 + ], + [ + 1337, + 308 + ], + [ + 1370, + 305 + ], + [ + 1368, + 317 + ], + [ + 1363, + 327 + ], + [ + 1355, + 330 + ], + [ + 1340, + 333 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1320, + 379 + ], + [ + 1313, + 367 + ], + [ + 1304, + 360 + ], + [ + 1295, + 358 + ], + [ + 1282, + 358 + ], + [ + 1272, + 367 + ], + [ + 1270, + 379 + ], + [ + 1269, + 393 + ], + [ + 1277, + 409 + ], + [ + 1292, + 413 + ], + [ + 1305, + 408 + ], + [ + 1315, + 396 + ], + [ + 1319, + 383 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1645, + 615 + ], + [ + 1561, + 613 + ], + [ + 1471, + 605 + ], + [ + 1400, + 598 + ], + [ + 1377, + 593 + ], + [ + 1367, + 585 + ], + [ + 1363, + 578 + ], + [ + 1349, + 570 + ], + [ + 1351, + 553 + ], + [ + 1366, + 547 + ], + [ + 1387, + 540 + ], + [ + 1414, + 535 + ], + [ + 1436, + 530 + ], + [ + 1460, + 527 + ], + [ + 1511, + 528 + ], + [ + 1550, + 528 + ], + [ + 1579, + 524 + ], + [ + 1600, + 514 + ], + [ + 1623, + 508 + ], + [ + 1660, + 516 + ], + [ + 1680, + 523 + ], + [ + 1707, + 515 + ], + [ + 1738, + 506 + ], + [ + 1777, + 506 + ], + [ + 1810, + 506 + ], + [ + 1843, + 508 + ], + [ + 1868, + 508 + ], + [ + 1905, + 507 + ], + [ + 1926, + 487 + ], + [ + 1976, + 490 + ], + [ + 2010, + 490 + ], + [ + 2048, + 486 + ], + [ + 2048, + 506 + ], + [ + 2048, + 604 + ], + [ + 1957, + 607 + ], + [ + 1875, + 615 + ], + [ + 1761, + 616 + ], + [ + 1663, + 615 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1760, + 630 + ], + [ + 1735, + 630 + ], + [ + 1724, + 621 + ], + [ + 1721, + 605 + ], + [ + 1723, + 582 + ], + [ + 1726, + 563 + ], + [ + 1727, + 546 + ], + [ + 1724, + 533 + ], + [ + 1715, + 530 + ], + [ + 1704, + 533 + ], + [ + 1695, + 527 + ], + [ + 1686, + 505 + ], + [ + 1696, + 477 + ], + [ + 1700, + 457 + ], + [ + 1707, + 440 + ], + [ + 1718, + 431 + ], + [ + 1713, + 419 + ], + [ + 1723, + 404 + ], + [ + 1737, + 397 + ], + [ + 1750, + 400 + ], + [ + 1764, + 408 + ], + [ + 1764, + 423 + ], + [ + 1760, + 432 + ], + [ + 1765, + 439 + ], + [ + 1764, + 470 + ], + [ + 1762, + 494 + ], + [ + 1757, + 515 + ], + [ + 1759, + 531 + ], + [ + 1762, + 557 + ], + [ + 1762, + 584 + ], + [ + 1761, + 612 + ], + [ + 1759, + 625 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1093, + 502 + ], + [ + 1095, + 518 + ], + [ + 1087, + 528 + ], + [ + 1073, + 533 + ], + [ + 1059, + 533 + ], + [ + 1031, + 533 + ], + [ + 1015, + 533 + ], + [ + 996, + 535 + ], + [ + 981, + 535 + ], + [ + 968, + 523 + ], + [ + 971, + 505 + ], + [ + 988, + 503 + ], + [ + 1004, + 498 + ], + [ + 1018, + 491 + ], + [ + 1044, + 491 + ], + [ + 1064, + 491 + ], + [ + 1087, + 494 + ], + [ + 1095, + 505 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1001, + 505 + ], + [ + 1000, + 543 + ], + [ + 994, + 544 + ], + [ + 994, + 505 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1012, + 505 + ], + [ + 1011, + 545 + ], + [ + 1005, + 545 + ], + [ + 1004, + 505 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1017, + 505 + ], + [ + 1025, + 505 + ], + [ + 1025, + 542 + ], + [ + 1018, + 541 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1029, + 440 + ], + [ + 1029, + 494 + ], + [ + 1022, + 495 + ], + [ + 1024, + 546 + ], + [ + 1039, + 546 + ], + [ + 1038, + 499 + ], + [ + 1031, + 492 + ], + [ + 1032, + 443 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1038, + 454 + ], + [ + 1025, + 456 + ], + [ + 1023, + 421 + ], + [ + 1034, + 415 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1071, + 511 + ], + [ + 1061, + 512 + ], + [ + 1063, + 553 + ], + [ + 1070, + 553 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1080, + 508 + ], + [ + 1080, + 554 + ], + [ + 1088, + 554 + ], + [ + 1089, + 510 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000126_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000126_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c1fdc0cf3e22c5460986039fc03ea3d7239c7887 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000126_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000127_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000127_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..2a24f52b162e4a0fb6ff844f4f510e992d679076 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000127_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000127_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000127_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..a78898b4802abf24211dfdec54b47288e8884f4b --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000127_000019_gtFine_polygons.json @@ -0,0 +1,3911 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 62, + 179 + ], + [ + 923, + 393 + ], + [ + 994, + 437 + ], + [ + 1040, + 440 + ], + [ + 1126, + 409 + ], + [ + 1336, + 288 + ], + [ + 1695, + 119 + ], + [ + 1810, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 159 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2048, + 437 + ], + [ + 1072, + 462 + ], + [ + 1035, + 460 + ], + [ + 974, + 463 + ], + [ + 755, + 475 + ], + [ + 533, + 517 + ], + [ + 0, + 508 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 713 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 895, + 485 + ], + [ + 810, + 503 + ], + [ + 724, + 527 + ], + [ + 568, + 574 + ], + [ + 0, + 747 + ], + [ + 0, + 570 + ], + [ + 613, + 498 + ], + [ + 775, + 470 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 895, + 485 + ], + [ + 810, + 503 + ], + [ + 724, + 527 + ], + [ + 568, + 574 + ], + [ + 0, + 747 + ], + [ + 0, + 570 + ], + [ + 613, + 498 + ], + [ + 775, + 470 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 108, + 474 + ], + [ + 560, + 460 + ], + [ + 747, + 467 + ], + [ + 767, + 470 + ], + [ + 767, + 478 + ], + [ + 81, + 563 + ], + [ + 39, + 484 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 146, + 581 + ], + [ + 514, + 530 + ], + [ + 659, + 507 + ], + [ + 737, + 494 + ], + [ + 780, + 481 + ], + [ + 851, + 465 + ], + [ + 730, + 476 + ], + [ + 639, + 490 + ], + [ + 264, + 520 + ], + [ + 118, + 539 + ], + [ + 99, + 598 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 391, + 607 + ], + [ + 262, + 607 + ], + [ + 187, + 607 + ], + [ + 131, + 602 + ], + [ + 149, + 567 + ], + [ + 177, + 559 + ], + [ + 321, + 547 + ], + [ + 369, + 541 + ], + [ + 393, + 538 + ], + [ + 472, + 552 + ], + [ + 561, + 557 + ], + [ + 575, + 560 + ], + [ + 562, + 574 + ], + [ + 478, + 596 + ], + [ + 428, + 608 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1073, + 445 + ], + [ + 1082, + 422 + ], + [ + 1094, + 408 + ], + [ + 1104, + 406 + ], + [ + 1112, + 400 + ], + [ + 1118, + 400 + ], + [ + 1118, + 394 + ], + [ + 1128, + 393 + ], + [ + 1130, + 382 + ], + [ + 1137, + 377 + ], + [ + 1221, + 329 + ], + [ + 1235, + 315 + ], + [ + 1252, + 315 + ], + [ + 1263, + 311 + ], + [ + 1266, + 304 + ], + [ + 1272, + 303 + ], + [ + 1280, + 296 + ], + [ + 1280, + 288 + ], + [ + 1287, + 288 + ], + [ + 1293, + 281 + ], + [ + 1296, + 268 + ], + [ + 1302, + 268 + ], + [ + 1314, + 250 + ], + [ + 1331, + 238 + ], + [ + 1335, + 225 + ], + [ + 1369, + 186 + ], + [ + 1513, + 137 + ], + [ + 1642, + 56 + ], + [ + 1670, + 43 + ], + [ + 1686, + 32 + ], + [ + 1704, + 32 + ], + [ + 1729, + 23 + ], + [ + 1731, + 20 + ], + [ + 1729, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 431 + ], + [ + 1530, + 498 + ], + [ + 1132, + 462 + ], + [ + 1070, + 454 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 21, + 94 + ], + [ + 32, + 86 + ], + [ + 43, + 74 + ], + [ + 55, + 70 + ], + [ + 78, + 72 + ], + [ + 106, + 81 + ], + [ + 118, + 82 + ], + [ + 111, + 76 + ], + [ + 116, + 68 + ], + [ + 120, + 62 + ], + [ + 104, + 60 + ], + [ + 93, + 55 + ], + [ + 78, + 53 + ], + [ + 66, + 58 + ], + [ + 62, + 67 + ], + [ + 47, + 66 + ], + [ + 44, + 60 + ], + [ + 62, + 45 + ], + [ + 56, + 41 + ], + [ + 48, + 35 + ], + [ + 39, + 27 + ], + [ + 35, + 20 + ], + [ + 23, + 21 + ], + [ + 20, + 29 + ], + [ + 21, + 37 + ], + [ + 19, + 45 + ], + [ + 0, + 39 + ], + [ + 0, + 0 + ], + [ + 820, + 0 + ], + [ + 818, + 15 + ], + [ + 820, + 25 + ], + [ + 830, + 32 + ], + [ + 837, + 48 + ], + [ + 848, + 54 + ], + [ + 868, + 62 + ], + [ + 886, + 77 + ], + [ + 891, + 104 + ], + [ + 884, + 118 + ], + [ + 875, + 131 + ], + [ + 879, + 137 + ], + [ + 879, + 149 + ], + [ + 861, + 144 + ], + [ + 862, + 161 + ], + [ + 875, + 167 + ], + [ + 889, + 145 + ], + [ + 902, + 145 + ], + [ + 912, + 157 + ], + [ + 908, + 172 + ], + [ + 889, + 195 + ], + [ + 879, + 215 + ], + [ + 889, + 214 + ], + [ + 901, + 203 + ], + [ + 916, + 214 + ], + [ + 920, + 230 + ], + [ + 935, + 238 + ], + [ + 942, + 246 + ], + [ + 951, + 257 + ], + [ + 962, + 276 + ], + [ + 972, + 280 + ], + [ + 974, + 293 + ], + [ + 976, + 300 + ], + [ + 986, + 299 + ], + [ + 988, + 311 + ], + [ + 988, + 331 + ], + [ + 1000, + 331 + ], + [ + 996, + 343 + ], + [ + 994, + 355 + ], + [ + 994, + 361 + ], + [ + 1002, + 365 + ], + [ + 1006, + 371 + ], + [ + 996, + 385 + ], + [ + 1003, + 392 + ], + [ + 1008, + 400 + ], + [ + 1008, + 410 + ], + [ + 1006, + 421 + ], + [ + 1006, + 427 + ], + [ + 1019, + 414 + ], + [ + 1031, + 413 + ], + [ + 1038, + 391 + ], + [ + 1042, + 388 + ], + [ + 1036, + 382 + ], + [ + 1041, + 374 + ], + [ + 1037, + 372 + ], + [ + 1027, + 364 + ], + [ + 1021, + 346 + ], + [ + 1026, + 339 + ], + [ + 1042, + 350 + ], + [ + 1039, + 339 + ], + [ + 1039, + 331 + ], + [ + 1031, + 320 + ], + [ + 1033, + 309 + ], + [ + 1039, + 302 + ], + [ + 1043, + 297 + ], + [ + 1030, + 287 + ], + [ + 1042, + 270 + ], + [ + 1060, + 263 + ], + [ + 1070, + 271 + ], + [ + 1100, + 302 + ], + [ + 1100, + 360 + ], + [ + 1098, + 376 + ], + [ + 1097, + 394 + ], + [ + 1094, + 405 + ], + [ + 1092, + 414 + ], + [ + 1090, + 433 + ], + [ + 1080, + 447 + ], + [ + 1034, + 457 + ], + [ + 993, + 461 + ], + [ + 945, + 460 + ], + [ + 854, + 460 + ], + [ + 690, + 478 + ], + [ + 628, + 476 + ], + [ + 386, + 474 + ], + [ + 134, + 478 + ], + [ + 0, + 149 + ], + [ + 0, + 55 + ], + [ + 4, + 64 + ], + [ + 12, + 85 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 395, + 479 + ], + [ + 466, + 479 + ], + [ + 473, + 466 + ], + [ + 410, + 466 + ], + [ + 372, + 466 + ], + [ + 319, + 465 + ], + [ + 247, + 468 + ], + [ + 194, + 468 + ], + [ + 166, + 469 + ], + [ + 169, + 489 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 696, + 453 + ], + [ + 696, + 465 + ], + [ + 686, + 478 + ], + [ + 673, + 469 + ], + [ + 662, + 469 + ], + [ + 664, + 454 + ], + [ + 671, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 702, + 220 + ], + [ + 711, + 496 + ], + [ + 717, + 496 + ], + [ + 706, + 220 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 734, + 217 + ], + [ + 709, + 216 + ], + [ + 705, + 221 + ], + [ + 706, + 232 + ], + [ + 736, + 223 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 629, + 426 + ], + [ + 610, + 424 + ], + [ + 581, + 422 + ], + [ + 548, + 421 + ], + [ + 518, + 424 + ], + [ + 505, + 427 + ], + [ + 493, + 437 + ], + [ + 476, + 459 + ], + [ + 469, + 456 + ], + [ + 464, + 458 + ], + [ + 460, + 464 + ], + [ + 468, + 466 + ], + [ + 467, + 469 + ], + [ + 456, + 478 + ], + [ + 449, + 489 + ], + [ + 442, + 512 + ], + [ + 449, + 533 + ], + [ + 455, + 554 + ], + [ + 472, + 555 + ], + [ + 482, + 545 + ], + [ + 492, + 542 + ], + [ + 513, + 544 + ], + [ + 560, + 543 + ], + [ + 574, + 544 + ], + [ + 585, + 547 + ], + [ + 590, + 553 + ], + [ + 606, + 553 + ], + [ + 615, + 547 + ], + [ + 616, + 533 + ], + [ + 632, + 533 + ], + [ + 636, + 543 + ], + [ + 643, + 544 + ], + [ + 652, + 539 + ], + [ + 656, + 527 + ], + [ + 656, + 506 + ], + [ + 656, + 491 + ], + [ + 652, + 472 + ], + [ + 639, + 443 + ], + [ + 633, + 429 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 330, + 140 + ], + [ + 347, + 550 + ], + [ + 357, + 550 + ], + [ + 339, + 129 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 269, + 509 + ], + [ + 267, + 546 + ], + [ + 268, + 565 + ], + [ + 276, + 564 + ], + [ + 275, + 530 + ], + [ + 275, + 512 + ], + [ + 284, + 508 + ], + [ + 422, + 507 + ], + [ + 419, + 555 + ], + [ + 432, + 556 + ], + [ + 435, + 524 + ], + [ + 435, + 507 + ], + [ + 441, + 506 + ], + [ + 446, + 546 + ], + [ + 454, + 550 + ], + [ + 464, + 547 + ], + [ + 459, + 522 + ], + [ + 459, + 505 + ], + [ + 469, + 505 + ], + [ + 478, + 503 + ], + [ + 478, + 499 + ], + [ + 420, + 499 + ], + [ + 284, + 505 + ], + [ + 276, + 505 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 312, + 563 + ], + [ + 293, + 560 + ], + [ + 292, + 424 + ], + [ + 289, + 274 + ], + [ + 287, + 258 + ], + [ + 277, + 204 + ], + [ + 270, + 183 + ], + [ + 234, + 164 + ], + [ + 218, + 142 + ], + [ + 225, + 106 + ], + [ + 196, + 75 + ], + [ + 220, + 39 + ], + [ + 184, + 0 + ], + [ + 411, + 0 + ], + [ + 429, + 30 + ], + [ + 430, + 85 + ], + [ + 421, + 129 + ], + [ + 381, + 153 + ], + [ + 341, + 158 + ], + [ + 325, + 165 + ], + [ + 310, + 197 + ], + [ + 304, + 233 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1095, + 476 + ], + [ + 1072, + 476 + ], + [ + 1120, + 497 + ], + [ + 2047, + 974 + ], + [ + 2047, + 537 + ], + [ + 1133, + 464 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1095, + 476 + ], + [ + 1072, + 476 + ], + [ + 1120, + 497 + ], + [ + 2047, + 974 + ], + [ + 2047, + 537 + ], + [ + 1133, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 21, + 701 + ], + [ + 42, + 712 + ], + [ + 73, + 710 + ], + [ + 95, + 699 + ], + [ + 108, + 685 + ], + [ + 120, + 644 + ], + [ + 124, + 620 + ], + [ + 158, + 607 + ], + [ + 164, + 576 + ], + [ + 179, + 476 + ], + [ + 179, + 450 + ], + [ + 176, + 417 + ], + [ + 174, + 384 + ], + [ + 167, + 302 + ], + [ + 150, + 212 + ], + [ + 132, + 178 + ], + [ + 135, + 160 + ], + [ + 141, + 159 + ], + [ + 151, + 158 + ], + [ + 155, + 138 + ], + [ + 0, + 90 + ], + [ + 0, + 699 + ], + [ + 21, + 690 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 946, + 427 + ], + [ + 935, + 429 + ], + [ + 935, + 450 + ], + [ + 936, + 458 + ], + [ + 957, + 455 + ], + [ + 962, + 449 + ], + [ + 964, + 443 + ], + [ + 972, + 442 + ], + [ + 972, + 438 + ], + [ + 962, + 433 + ], + [ + 952, + 433 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1025, + 453 + ], + [ + 1020, + 451 + ], + [ + 1013, + 451 + ], + [ + 1008, + 457 + ], + [ + 1005, + 457 + ], + [ + 1001, + 455 + ], + [ + 997, + 450 + ], + [ + 990, + 450 + ], + [ + 981, + 453 + ], + [ + 976, + 455 + ], + [ + 968, + 450 + ], + [ + 959, + 447 + ], + [ + 948, + 446 + ], + [ + 939, + 447 + ], + [ + 935, + 447 + ], + [ + 926, + 449 + ], + [ + 923, + 451 + ], + [ + 923, + 452 + ], + [ + 924, + 462 + ], + [ + 933, + 469 + ], + [ + 938, + 465 + ], + [ + 945, + 465 + ], + [ + 950, + 467 + ], + [ + 956, + 467 + ], + [ + 960, + 464 + ], + [ + 967, + 466 + ], + [ + 975, + 465 + ], + [ + 981, + 462 + ], + [ + 991, + 465 + ], + [ + 998, + 464 + ], + [ + 1006, + 464 + ], + [ + 1009, + 464 + ], + [ + 1016, + 464 + ], + [ + 1025, + 463 + ], + [ + 1029, + 463 + ], + [ + 1029, + 460 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 912, + 447 + ], + [ + 906, + 447 + ], + [ + 896, + 446 + ], + [ + 900, + 465 + ], + [ + 904, + 474 + ], + [ + 912, + 476 + ], + [ + 920, + 475 + ], + [ + 920, + 466 + ], + [ + 919, + 456 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 879, + 444 + ], + [ + 884, + 445 + ], + [ + 901, + 450 + ], + [ + 908, + 455 + ], + [ + 909, + 470 + ], + [ + 908, + 478 + ], + [ + 905, + 479 + ], + [ + 901, + 479 + ], + [ + 900, + 475 + ], + [ + 886, + 474 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 818, + 442 + ], + [ + 827, + 408 + ], + [ + 831, + 405 + ], + [ + 879, + 407 + ], + [ + 885, + 411 + ], + [ + 888, + 422 + ], + [ + 892, + 444 + ], + [ + 892, + 461 + ], + [ + 892, + 475 + ], + [ + 892, + 482 + ], + [ + 888, + 486 + ], + [ + 882, + 486 + ], + [ + 876, + 487 + ], + [ + 874, + 481 + ], + [ + 859, + 480 + ], + [ + 820, + 459 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 842, + 445 + ], + [ + 854, + 446 + ], + [ + 861, + 448 + ], + [ + 865, + 453 + ], + [ + 867, + 453 + ], + [ + 869, + 460 + ], + [ + 870, + 466 + ], + [ + 870, + 479 + ], + [ + 869, + 487 + ], + [ + 865, + 490 + ], + [ + 856, + 490 + ], + [ + 854, + 487 + ], + [ + 834, + 487 + ], + [ + 823, + 472 + ], + [ + 821, + 452 + ], + [ + 823, + 448 + ], + [ + 828, + 446 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 755, + 449 + ], + [ + 765, + 436 + ], + [ + 791, + 435 + ], + [ + 823, + 436 + ], + [ + 832, + 450 + ], + [ + 837, + 467 + ], + [ + 837, + 484 + ], + [ + 837, + 493 + ], + [ + 834, + 495 + ], + [ + 829, + 495 + ], + [ + 826, + 494 + ], + [ + 824, + 497 + ], + [ + 819, + 498 + ], + [ + 813, + 497 + ], + [ + 810, + 491 + ], + [ + 803, + 491 + ], + [ + 798, + 493 + ], + [ + 771, + 492 + ], + [ + 767, + 498 + ], + [ + 761, + 498 + ], + [ + 757, + 492 + ], + [ + 754, + 498 + ], + [ + 747, + 498 + ], + [ + 740, + 496 + ], + [ + 740, + 485 + ], + [ + 740, + 474 + ], + [ + 741, + 465 + ], + [ + 748, + 458 + ], + [ + 745, + 454 + ], + [ + 747, + 450 + ], + [ + 752, + 449 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1018, + 433 + ], + [ + 1015, + 430 + ], + [ + 1012, + 433 + ], + [ + 1012, + 436 + ], + [ + 1015, + 436 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1334, + 326 + ], + [ + 1337, + 345 + ], + [ + 1346, + 364 + ], + [ + 1362, + 390 + ], + [ + 1360, + 403 + ], + [ + 1294, + 426 + ], + [ + 1251, + 435 + ], + [ + 1213, + 449 + ], + [ + 1182, + 450 + ], + [ + 1155, + 445 + ], + [ + 1149, + 426 + ], + [ + 1148, + 417 + ], + [ + 1140, + 408 + ], + [ + 1133, + 395 + ], + [ + 1129, + 385 + ], + [ + 1121, + 381 + ], + [ + 1121, + 391 + ], + [ + 1120, + 398 + ], + [ + 1120, + 408 + ], + [ + 1127, + 410 + ], + [ + 1134, + 423 + ], + [ + 1128, + 436 + ], + [ + 1104, + 447 + ], + [ + 1083, + 449 + ], + [ + 1072, + 448 + ], + [ + 1069, + 422 + ], + [ + 1062, + 308 + ], + [ + 1062, + 270 + ], + [ + 1063, + 255 + ], + [ + 1070, + 239 + ], + [ + 1074, + 236 + ], + [ + 1080, + 233 + ], + [ + 1073, + 227 + ], + [ + 1083, + 216 + ], + [ + 1093, + 194 + ], + [ + 1098, + 190 + ], + [ + 1087, + 170 + ], + [ + 1086, + 153 + ], + [ + 1100, + 140 + ], + [ + 1102, + 130 + ], + [ + 1115, + 120 + ], + [ + 1111, + 113 + ], + [ + 1114, + 94 + ], + [ + 1127, + 79 + ], + [ + 1144, + 55 + ], + [ + 1144, + 43 + ], + [ + 1136, + 34 + ], + [ + 1136, + 23 + ], + [ + 1125, + 21 + ], + [ + 1114, + 23 + ], + [ + 1102, + 20 + ], + [ + 1093, + 18 + ], + [ + 1095, + 7 + ], + [ + 1101, + 0 + ], + [ + 1504, + 0 + ], + [ + 1492, + 148 + ], + [ + 1473, + 164 + ], + [ + 1448, + 181 + ], + [ + 1441, + 198 + ], + [ + 1422, + 236 + ], + [ + 1418, + 262 + ], + [ + 1399, + 286 + ], + [ + 1382, + 297 + ], + [ + 1359, + 308 + ], + [ + 1337, + 304 + ], + [ + 1327, + 292 + ], + [ + 1313, + 296 + ], + [ + 1312, + 315 + ], + [ + 1294, + 314 + ], + [ + 1271, + 315 + ], + [ + 1267, + 329 + ], + [ + 1284, + 339 + ], + [ + 1294, + 350 + ], + [ + 1299, + 357 + ], + [ + 1305, + 348 + ], + [ + 1308, + 325 + ], + [ + 1322, + 319 + ], + [ + 1335, + 323 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1045, + 447 + ], + [ + 1036, + 449 + ], + [ + 1030, + 452 + ], + [ + 1030, + 462 + ], + [ + 1032, + 464 + ], + [ + 1038, + 462 + ], + [ + 1043, + 464 + ], + [ + 1049, + 464 + ], + [ + 1053, + 463 + ], + [ + 1057, + 463 + ], + [ + 1063, + 464 + ], + [ + 1065, + 464 + ], + [ + 1067, + 467 + ], + [ + 1071, + 468 + ], + [ + 1072, + 473 + ], + [ + 1075, + 478 + ], + [ + 1085, + 478 + ], + [ + 1093, + 477 + ], + [ + 1098, + 479 + ], + [ + 1107, + 482 + ], + [ + 1114, + 480 + ], + [ + 1117, + 477 + ], + [ + 1124, + 474 + ], + [ + 1161, + 462 + ], + [ + 1186, + 445 + ], + [ + 1200, + 430 + ], + [ + 1200, + 424 + ], + [ + 1200, + 419 + ], + [ + 1192, + 419 + ], + [ + 1119, + 425 + ], + [ + 1114, + 426 + ], + [ + 1096, + 443 + ], + [ + 1095, + 436 + ], + [ + 1057, + 437 + ], + [ + 1057, + 443 + ], + [ + 1052, + 446 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1162, + 333 + ], + [ + 1168, + 437 + ], + [ + 1175, + 438 + ], + [ + 1169, + 337 + ], + [ + 1204, + 265 + ], + [ + 1173, + 153 + ], + [ + 1132, + 199 + ], + [ + 1124, + 284 + ], + [ + 1144, + 324 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1132, + 480 + ], + [ + 1125, + 478 + ], + [ + 1120, + 471 + ], + [ + 1117, + 462 + ], + [ + 1123, + 449 + ], + [ + 1126, + 440 + ], + [ + 1128, + 437 + ], + [ + 1144, + 437 + ], + [ + 1146, + 457 + ], + [ + 1142, + 475 + ], + [ + 1139, + 480 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1218, + 433 + ], + [ + 1206, + 428 + ], + [ + 1190, + 428 + ], + [ + 1155, + 431 + ], + [ + 1152, + 432 + ], + [ + 1145, + 433 + ], + [ + 1142, + 437 + ], + [ + 1134, + 445 + ], + [ + 1131, + 458 + ], + [ + 1132, + 476 + ], + [ + 1136, + 477 + ], + [ + 1153, + 477 + ], + [ + 1220, + 449 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1140, + 490 + ], + [ + 1134, + 488 + ], + [ + 1130, + 481 + ], + [ + 1130, + 475 + ], + [ + 1133, + 467 + ], + [ + 1140, + 463 + ], + [ + 1161, + 454 + ], + [ + 1178, + 450 + ], + [ + 1187, + 444 + ], + [ + 1206, + 433 + ], + [ + 1222, + 433 + ], + [ + 1237, + 434 + ], + [ + 1230, + 451 + ], + [ + 1211, + 474 + ], + [ + 1194, + 485 + ], + [ + 1186, + 485 + ], + [ + 1183, + 485 + ], + [ + 1176, + 491 + ], + [ + 1167, + 492 + ], + [ + 1157, + 492 + ], + [ + 1155, + 489 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1175, + 505 + ], + [ + 1145, + 505 + ], + [ + 1133, + 502 + ], + [ + 1135, + 491 + ], + [ + 1161, + 488 + ], + [ + 1180, + 486 + ], + [ + 1201, + 485 + ], + [ + 1212, + 487 + ], + [ + 1216, + 496 + ], + [ + 1214, + 504 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1218, + 506 + ], + [ + 1211, + 505 + ], + [ + 1206, + 499 + ], + [ + 1205, + 495 + ], + [ + 1197, + 495 + ], + [ + 1193, + 493 + ], + [ + 1189, + 483 + ], + [ + 1189, + 471 + ], + [ + 1190, + 456 + ], + [ + 1191, + 447 + ], + [ + 1200, + 437 + ], + [ + 1216, + 425 + ], + [ + 1226, + 423 + ], + [ + 1249, + 419 + ], + [ + 1265, + 420 + ], + [ + 1254, + 461 + ], + [ + 1241, + 496 + ], + [ + 1238, + 504 + ], + [ + 1230, + 507 + ], + [ + 1222, + 508 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1328, + 394 + ], + [ + 1275, + 402 + ], + [ + 1260, + 406 + ], + [ + 1252, + 408 + ], + [ + 1247, + 412 + ], + [ + 1238, + 434 + ], + [ + 1234, + 450 + ], + [ + 1231, + 465 + ], + [ + 1232, + 480 + ], + [ + 1232, + 490 + ], + [ + 1228, + 500 + ], + [ + 1231, + 509 + ], + [ + 1250, + 512 + ], + [ + 1273, + 512 + ], + [ + 1286, + 514 + ], + [ + 1342, + 513 + ], + [ + 1389, + 505 + ], + [ + 1433, + 481 + ], + [ + 1450, + 448 + ], + [ + 1455, + 419 + ], + [ + 1448, + 402 + ], + [ + 1421, + 394 + ], + [ + 1377, + 393 + ], + [ + 1348, + 393 + ], + [ + 1333, + 394 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1318, + 548 + ], + [ + 1266, + 547 + ], + [ + 1230, + 550 + ], + [ + 1216, + 545 + ], + [ + 1211, + 536 + ], + [ + 1220, + 529 + ], + [ + 1240, + 522 + ], + [ + 1269, + 516 + ], + [ + 1284, + 511 + ], + [ + 1305, + 505 + ], + [ + 1352, + 499 + ], + [ + 1376, + 497 + ], + [ + 1394, + 498 + ], + [ + 1412, + 513 + ], + [ + 1412, + 530 + ], + [ + 1377, + 542 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1377, + 430 + ], + [ + 1377, + 489 + ], + [ + 1418, + 486 + ], + [ + 1405, + 441 + ], + [ + 1402, + 415 + ], + [ + 1400, + 334 + ], + [ + 1403, + 193 + ], + [ + 1413, + 172 + ], + [ + 1445, + 172 + ], + [ + 1472, + 171 + ], + [ + 1487, + 186 + ], + [ + 1508, + 184 + ], + [ + 1533, + 199 + ], + [ + 1532, + 215 + ], + [ + 1547, + 219 + ], + [ + 1557, + 230 + ], + [ + 1565, + 238 + ], + [ + 1585, + 235 + ], + [ + 1617, + 210 + ], + [ + 1650, + 185 + ], + [ + 1692, + 172 + ], + [ + 1720, + 172 + ], + [ + 1726, + 147 + ], + [ + 1728, + 124 + ], + [ + 1751, + 107 + ], + [ + 1755, + 90 + ], + [ + 1751, + 73 + ], + [ + 1768, + 40 + ], + [ + 1772, + 31 + ], + [ + 1759, + 26 + ], + [ + 1727, + 43 + ], + [ + 1711, + 54 + ], + [ + 1701, + 45 + ], + [ + 1709, + 25 + ], + [ + 1709, + 0 + ], + [ + 1699, + 0 + ], + [ + 1294, + 0 + ], + [ + 1286, + 118 + ], + [ + 1296, + 132 + ], + [ + 1343, + 150 + ], + [ + 1368, + 165 + ], + [ + 1385, + 178 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1564, + 408 + ], + [ + 1562, + 396 + ], + [ + 1563, + 380 + ], + [ + 1573, + 360 + ], + [ + 1587, + 347 + ], + [ + 1613, + 339 + ], + [ + 1630, + 338 + ], + [ + 1646, + 337 + ], + [ + 1664, + 339 + ], + [ + 1677, + 354 + ], + [ + 1704, + 379 + ], + [ + 1708, + 394 + ], + [ + 1707, + 414 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1529, + 400 + ], + [ + 1496, + 398 + ], + [ + 1459, + 400 + ], + [ + 1430, + 406 + ], + [ + 1408, + 411 + ], + [ + 1402, + 411 + ], + [ + 1399, + 419 + ], + [ + 1386, + 443 + ], + [ + 1376, + 468 + ], + [ + 1373, + 486 + ], + [ + 1366, + 494 + ], + [ + 1368, + 511 + ], + [ + 1371, + 522 + ], + [ + 1380, + 528 + ], + [ + 1415, + 523 + ], + [ + 1585, + 444 + ], + [ + 1581, + 406 + ], + [ + 1571, + 400 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1451, + 575 + ], + [ + 1419, + 574 + ], + [ + 1411, + 568 + ], + [ + 1402, + 566 + ], + [ + 1383, + 560 + ], + [ + 1378, + 539 + ], + [ + 1380, + 515 + ], + [ + 1386, + 502 + ], + [ + 1386, + 492 + ], + [ + 1382, + 488 + ], + [ + 1382, + 475 + ], + [ + 1387, + 466 + ], + [ + 1396, + 465 + ], + [ + 1412, + 464 + ], + [ + 1424, + 461 + ], + [ + 1445, + 457 + ], + [ + 1455, + 450 + ], + [ + 1476, + 440 + ], + [ + 1495, + 426 + ], + [ + 1514, + 418 + ], + [ + 1523, + 414 + ], + [ + 1549, + 405 + ], + [ + 1583, + 398 + ], + [ + 1627, + 394 + ], + [ + 1683, + 393 + ], + [ + 1719, + 393 + ], + [ + 1745, + 400 + ], + [ + 1701, + 505 + ], + [ + 1584, + 555 + ], + [ + 1510, + 577 + ], + [ + 1488, + 577 + ], + [ + 1486, + 574 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1544, + 617 + ], + [ + 1513, + 617 + ], + [ + 1497, + 613 + ], + [ + 1486, + 601 + ], + [ + 1480, + 583 + ], + [ + 1474, + 568 + ], + [ + 1474, + 550 + ], + [ + 1477, + 535 + ], + [ + 1491, + 509 + ], + [ + 1514, + 489 + ], + [ + 1547, + 472 + ], + [ + 1586, + 463 + ], + [ + 1616, + 457 + ], + [ + 1660, + 424 + ], + [ + 1723, + 385 + ], + [ + 1753, + 372 + ], + [ + 1780, + 361 + ], + [ + 1820, + 349 + ], + [ + 1876, + 338 + ], + [ + 1935, + 330 + ], + [ + 1994, + 323 + ], + [ + 2046, + 319 + ], + [ + 2048, + 319 + ], + [ + 2048, + 586 + ], + [ + 2045, + 595 + ], + [ + 1769, + 616 + ], + [ + 1757, + 617 + ], + [ + 1750, + 629 + ], + [ + 1728, + 639 + ], + [ + 1694, + 641 + ], + [ + 1666, + 639 + ], + [ + 1642, + 637 + ], + [ + 1629, + 631 + ], + [ + 1612, + 633 + ], + [ + 1592, + 634 + ], + [ + 1575, + 626 + ], + [ + 1569, + 619 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000128_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000128_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ff96298d5cb76cd235a045bfaa9549023695ec46 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000128_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000128_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000128_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..348714861d8b45365d7e6391f7f048d5f3dbc671 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000128_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..45c105a7d8339a8d43961a7e31e82eddbdf0a08b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..bbd8e0b43262c003c35a61f6aef313083ff40e81 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..71f1b4277d94014009b2e42debd430781c5d9a2d --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000129_000019_gtFine_polygons.json @@ -0,0 +1,8697 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 3, + 220 + ], + [ + 297, + 328 + ], + [ + 519, + 328 + ], + [ + 856, + 341 + ], + [ + 1037, + 340 + ], + [ + 1254, + 340 + ], + [ + 1385, + 273 + ], + [ + 1574, + 187 + ], + [ + 1698, + 252 + ], + [ + 1841, + 263 + ], + [ + 1958, + 232 + ], + [ + 2048, + 201 + ], + [ + 2048, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 223 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 998, + 399 + ], + [ + 1094, + 408 + ], + [ + 1252, + 423 + ], + [ + 1660, + 439 + ], + [ + 2014, + 441 + ], + [ + 2048, + 442 + ], + [ + 2047, + 864 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 436 + ], + [ + 450, + 450 + ], + [ + 599, + 422 + ], + [ + 742, + 422 + ], + [ + 814, + 424 + ], + [ + 914, + 424 + ], + [ + 958, + 405 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 865, + 480 + ], + [ + 807, + 482 + ], + [ + 749, + 483 + ], + [ + 731, + 483 + ], + [ + 701, + 485 + ], + [ + 533, + 497 + ], + [ + 515, + 497 + ], + [ + 392, + 501 + ], + [ + 309, + 501 + ], + [ + 290, + 499 + ], + [ + 369, + 488 + ], + [ + 483, + 471 + ], + [ + 827, + 451 + ], + [ + 848, + 444 + ], + [ + 894, + 443 + ], + [ + 880, + 456 + ], + [ + 883, + 468 + ], + [ + 875, + 477 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 201, + 670 + ], + [ + 382, + 607 + ], + [ + 525, + 610 + ], + [ + 582, + 583 + ], + [ + 613, + 567 + ], + [ + 624, + 556 + ], + [ + 622, + 547 + ], + [ + 529, + 540 + ], + [ + 495, + 539 + ], + [ + 384, + 537 + ], + [ + 312, + 536 + ], + [ + 268, + 538 + ], + [ + 72, + 549 + ], + [ + 0, + 555 + ], + [ + 0, + 682 + ], + [ + 0, + 734 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1043, + 465 + ], + [ + 1055, + 477 + ], + [ + 1094, + 478 + ], + [ + 1337, + 472 + ], + [ + 1405, + 473 + ], + [ + 1590, + 468 + ], + [ + 1657, + 468 + ], + [ + 1672, + 463 + ], + [ + 1646, + 449 + ], + [ + 1285, + 453 + ], + [ + 1256, + 447 + ], + [ + 1189, + 445 + ], + [ + 1068, + 455 + ], + [ + 1050, + 459 + ], + [ + 1042, + 462 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1865, + 855 + ], + [ + 1601, + 721 + ], + [ + 1427, + 641 + ], + [ + 1281, + 577 + ], + [ + 1234, + 560 + ], + [ + 1235, + 546 + ], + [ + 1250, + 540 + ], + [ + 1306, + 532 + ], + [ + 1699, + 514 + ], + [ + 1793, + 516 + ], + [ + 1871, + 511 + ], + [ + 1896, + 507 + ], + [ + 1980, + 502 + ], + [ + 2031, + 493 + ], + [ + 2048, + 493 + ], + [ + 2048, + 864 + ], + [ + 1947, + 867 + ], + [ + 1893, + 860 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1449, + 625 + ], + [ + 1569, + 615 + ], + [ + 1627, + 591 + ], + [ + 1602, + 565 + ], + [ + 1595, + 535 + ], + [ + 1580, + 529 + ], + [ + 1554, + 531 + ], + [ + 1539, + 537 + ], + [ + 1538, + 549 + ], + [ + 1515, + 541 + ], + [ + 1500, + 531 + ], + [ + 1495, + 525 + ], + [ + 1595, + 522 + ], + [ + 1596, + 534 + ], + [ + 1623, + 537 + ], + [ + 1676, + 533 + ], + [ + 1709, + 522 + ], + [ + 1734, + 517 + ], + [ + 1737, + 515 + ], + [ + 1849, + 511 + ], + [ + 1861, + 512 + ], + [ + 1849, + 519 + ], + [ + 1868, + 528 + ], + [ + 1911, + 533 + ], + [ + 1959, + 535 + ], + [ + 2007, + 538 + ], + [ + 2048, + 547 + ], + [ + 2048, + 643 + ], + [ + 2037, + 643 + ], + [ + 2020, + 635 + ], + [ + 2007, + 644 + ], + [ + 1999, + 650 + ], + [ + 1989, + 641 + ], + [ + 1961, + 641 + ], + [ + 1930, + 651 + ], + [ + 1881, + 656 + ], + [ + 1860, + 650 + ], + [ + 1853, + 659 + ], + [ + 1867, + 671 + ], + [ + 1835, + 663 + ], + [ + 1804, + 662 + ], + [ + 1776, + 663 + ], + [ + 1733, + 673 + ], + [ + 1703, + 678 + ], + [ + 1685, + 679 + ], + [ + 1662, + 687 + ], + [ + 1656, + 693 + ], + [ + 1631, + 698 + ], + [ + 1577, + 710 + ], + [ + 1424, + 641 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 405, + 599 + ], + [ + 457, + 575 + ], + [ + 475, + 560 + ], + [ + 473, + 553 + ], + [ + 457, + 546 + ], + [ + 438, + 541 + ], + [ + 409, + 541 + ], + [ + 373, + 544 + ], + [ + 356, + 544 + ], + [ + 338, + 545 + ], + [ + 321, + 551 + ], + [ + 295, + 557 + ], + [ + 273, + 560 + ], + [ + 226, + 568 + ], + [ + 185, + 569 + ], + [ + 155, + 574 + ], + [ + 68, + 588 + ], + [ + 44, + 596 + ], + [ + 20, + 600 + ], + [ + 0, + 608 + ], + [ + 0, + 679 + ], + [ + 168, + 676 + ], + [ + 192, + 673 + ], + [ + 384, + 609 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1537, + 474 + ], + [ + 1724, + 468 + ], + [ + 1767, + 466 + ], + [ + 1904, + 477 + ], + [ + 1888, + 484 + ], + [ + 1557, + 488 + ], + [ + 1537, + 488 + ], + [ + 1520, + 475 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 731, + 490 + ], + [ + 617, + 493 + ], + [ + 518, + 498 + ], + [ + 509, + 494 + ], + [ + 519, + 484 + ], + [ + 477, + 484 + ], + [ + 459, + 486 + ], + [ + 444, + 488 + ], + [ + 428, + 489 + ], + [ + 399, + 490 + ], + [ + 371, + 492 + ], + [ + 331, + 498 + ], + [ + 281, + 503 + ], + [ + 199, + 502 + ], + [ + 156, + 502 + ], + [ + 158, + 478 + ], + [ + 176, + 478 + ], + [ + 206, + 479 + ], + [ + 247, + 478 + ], + [ + 285, + 476 + ], + [ + 328, + 477 + ], + [ + 367, + 482 + ], + [ + 391, + 484 + ], + [ + 423, + 482 + ], + [ + 436, + 482 + ], + [ + 473, + 477 + ], + [ + 544, + 478 + ], + [ + 563, + 474 + ], + [ + 617, + 476 + ], + [ + 656, + 472 + ], + [ + 684, + 472 + ], + [ + 706, + 474 + ], + [ + 724, + 479 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 422, + 274 + ], + [ + 418, + 269 + ], + [ + 399, + 267 + ], + [ + 394, + 272 + ], + [ + 392, + 287 + ], + [ + 389, + 287 + ], + [ + 384, + 281 + ], + [ + 381, + 278 + ], + [ + 376, + 278 + ], + [ + 374, + 268 + ], + [ + 360, + 255 + ], + [ + 344, + 252 + ], + [ + 338, + 257 + ], + [ + 335, + 265 + ], + [ + 333, + 275 + ], + [ + 325, + 276 + ], + [ + 330, + 266 + ], + [ + 330, + 260 + ], + [ + 323, + 255 + ], + [ + 326, + 250 + ], + [ + 345, + 242 + ], + [ + 343, + 225 + ], + [ + 339, + 219 + ], + [ + 322, + 218 + ], + [ + 329, + 209 + ], + [ + 328, + 203 + ], + [ + 342, + 192 + ], + [ + 341, + 182 + ], + [ + 334, + 173 + ], + [ + 328, + 164 + ], + [ + 337, + 160 + ], + [ + 340, + 153 + ], + [ + 335, + 150 + ], + [ + 330, + 135 + ], + [ + 328, + 126 + ], + [ + 333, + 116 + ], + [ + 329, + 91 + ], + [ + 311, + 62 + ], + [ + 308, + 55 + ], + [ + 296, + 55 + ], + [ + 284, + 51 + ], + [ + 270, + 52 + ], + [ + 264, + 44 + ], + [ + 264, + 37 + ], + [ + 273, + 27 + ], + [ + 257, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 470 + ], + [ + 174, + 483 + ], + [ + 186, + 479 + ], + [ + 215, + 480 + ], + [ + 249, + 478 + ], + [ + 273, + 477 + ], + [ + 293, + 477 + ], + [ + 312, + 479 + ], + [ + 345, + 482 + ], + [ + 365, + 483 + ], + [ + 368, + 484 + ], + [ + 381, + 411 + ], + [ + 370, + 350 + ], + [ + 381, + 315 + ], + [ + 424, + 291 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 851, + 296 + ], + [ + 812, + 298 + ], + [ + 727, + 299 + ], + [ + 726, + 290 + ], + [ + 724, + 290 + ], + [ + 724, + 284 + ], + [ + 720, + 284 + ], + [ + 720, + 288 + ], + [ + 718, + 288 + ], + [ + 718, + 296 + ], + [ + 712, + 296 + ], + [ + 710, + 292 + ], + [ + 697, + 289 + ], + [ + 695, + 284 + ], + [ + 692, + 284 + ], + [ + 692, + 278 + ], + [ + 687, + 278 + ], + [ + 685, + 277 + ], + [ + 684, + 275 + ], + [ + 683, + 270 + ], + [ + 682, + 270 + ], + [ + 682, + 276 + ], + [ + 678, + 276 + ], + [ + 678, + 280 + ], + [ + 674, + 277 + ], + [ + 673, + 273 + ], + [ + 671, + 270 + ], + [ + 668, + 270 + ], + [ + 663, + 269 + ], + [ + 659, + 272 + ], + [ + 659, + 267 + ], + [ + 659, + 264 + ], + [ + 656, + 264 + ], + [ + 652, + 265 + ], + [ + 652, + 267 + ], + [ + 652, + 269 + ], + [ + 645, + 267 + ], + [ + 643, + 263 + ], + [ + 640, + 263 + ], + [ + 640, + 258 + ], + [ + 637, + 256 + ], + [ + 635, + 256 + ], + [ + 635, + 260 + ], + [ + 635, + 265 + ], + [ + 615, + 265 + ], + [ + 615, + 261 + ], + [ + 612, + 261 + ], + [ + 611, + 240 + ], + [ + 597, + 241 + ], + [ + 597, + 253 + ], + [ + 589, + 254 + ], + [ + 590, + 257 + ], + [ + 569, + 250 + ], + [ + 566, + 250 + ], + [ + 535, + 220 + ], + [ + 527, + 220 + ], + [ + 519, + 208 + ], + [ + 506, + 208 + ], + [ + 496, + 204 + ], + [ + 500, + 201 + ], + [ + 500, + 198 + ], + [ + 500, + 191 + ], + [ + 496, + 186 + ], + [ + 493, + 185 + ], + [ + 488, + 185 + ], + [ + 484, + 187 + ], + [ + 481, + 190 + ], + [ + 481, + 197 + ], + [ + 482, + 201 + ], + [ + 461, + 221 + ], + [ + 413, + 269 + ], + [ + 402, + 284 + ], + [ + 399, + 289 + ], + [ + 359, + 293 + ], + [ + 358, + 367 + ], + [ + 364, + 447 + ], + [ + 370, + 454 + ], + [ + 539, + 455 + ], + [ + 603, + 455 + ], + [ + 651, + 454 + ], + [ + 715, + 444 + ], + [ + 779, + 448 + ], + [ + 825, + 440 + ], + [ + 888, + 434 + ], + [ + 932, + 436 + ], + [ + 947, + 428 + ], + [ + 944, + 381 + ], + [ + 907, + 325 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1792, + 381 + ], + [ + 1783, + 311 + ], + [ + 1793, + 265 + ], + [ + 1806, + 258 + ], + [ + 1821, + 257 + ], + [ + 1820, + 242 + ], + [ + 1840, + 233 + ], + [ + 1851, + 228 + ], + [ + 1871, + 226 + ], + [ + 1895, + 192 + ], + [ + 1971, + 143 + ], + [ + 2048, + 139 + ], + [ + 2048, + 440 + ], + [ + 2025, + 455 + ], + [ + 1793, + 459 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1813, + 352 + ], + [ + 1813, + 265 + ], + [ + 1807, + 257 + ], + [ + 1814, + 243 + ], + [ + 1805, + 242 + ], + [ + 1806, + 231 + ], + [ + 1810, + 217 + ], + [ + 1805, + 216 + ], + [ + 1799, + 215 + ], + [ + 1786, + 218 + ], + [ + 1780, + 216 + ], + [ + 1774, + 217 + ], + [ + 1770, + 214 + ], + [ + 1764, + 214 + ], + [ + 1759, + 215 + ], + [ + 1758, + 219 + ], + [ + 1752, + 220 + ], + [ + 1740, + 209 + ], + [ + 1767, + 199 + ], + [ + 1730, + 191 + ], + [ + 1734, + 188 + ], + [ + 1723, + 182 + ], + [ + 1722, + 163 + ], + [ + 1718, + 162 + ], + [ + 1706, + 163 + ], + [ + 1704, + 165 + ], + [ + 1703, + 173 + ], + [ + 1622, + 94 + ], + [ + 1621, + 80 + ], + [ + 1612, + 81 + ], + [ + 1611, + 57 + ], + [ + 1607, + 56 + ], + [ + 1609, + 82 + ], + [ + 1595, + 84 + ], + [ + 1595, + 102 + ], + [ + 1568, + 115 + ], + [ + 1538, + 117 + ], + [ + 1531, + 118 + ], + [ + 1532, + 135 + ], + [ + 1523, + 136 + ], + [ + 1519, + 140 + ], + [ + 1498, + 160 + ], + [ + 1486, + 172 + ], + [ + 1485, + 170 + ], + [ + 1476, + 170 + ], + [ + 1474, + 164 + ], + [ + 1472, + 164 + ], + [ + 1472, + 171 + ], + [ + 1466, + 172 + ], + [ + 1466, + 183 + ], + [ + 1458, + 189 + ], + [ + 1425, + 189 + ], + [ + 1422, + 191 + ], + [ + 1422, + 200 + ], + [ + 1419, + 201 + ], + [ + 1415, + 206 + ], + [ + 1414, + 209 + ], + [ + 1394, + 219 + ], + [ + 1392, + 212 + ], + [ + 1391, + 211 + ], + [ + 1387, + 205 + ], + [ + 1384, + 209 + ], + [ + 1384, + 214 + ], + [ + 1382, + 214 + ], + [ + 1382, + 220 + ], + [ + 1381, + 222 + ], + [ + 1380, + 231 + ], + [ + 1371, + 232 + ], + [ + 1368, + 221 + ], + [ + 1361, + 223 + ], + [ + 1361, + 228 + ], + [ + 1358, + 228 + ], + [ + 1358, + 236 + ], + [ + 1357, + 237 + ], + [ + 1352, + 235 + ], + [ + 1351, + 192 + ], + [ + 1347, + 192 + ], + [ + 1349, + 237 + ], + [ + 1346, + 241 + ], + [ + 1341, + 241 + ], + [ + 1341, + 233 + ], + [ + 1335, + 233 + ], + [ + 1325, + 235 + ], + [ + 1327, + 249 + ], + [ + 1323, + 248 + ], + [ + 1322, + 236 + ], + [ + 1307, + 239 + ], + [ + 1306, + 260 + ], + [ + 1299, + 261 + ], + [ + 1301, + 268 + ], + [ + 1294, + 268 + ], + [ + 1294, + 271 + ], + [ + 1290, + 275 + ], + [ + 1282, + 276 + ], + [ + 1278, + 277 + ], + [ + 1279, + 283 + ], + [ + 1271, + 283 + ], + [ + 1261, + 290 + ], + [ + 1252, + 291 + ], + [ + 1252, + 284 + ], + [ + 1245, + 284 + ], + [ + 1244, + 298 + ], + [ + 1243, + 294 + ], + [ + 1239, + 294 + ], + [ + 1233, + 296 + ], + [ + 1235, + 299 + ], + [ + 1236, + 304 + ], + [ + 1224, + 310 + ], + [ + 1224, + 305 + ], + [ + 1224, + 286 + ], + [ + 1218, + 284 + ], + [ + 1220, + 312 + ], + [ + 1175, + 326 + ], + [ + 1099, + 361 + ], + [ + 1072, + 371 + ], + [ + 1063, + 416 + ], + [ + 1066, + 437 + ], + [ + 1155, + 439 + ], + [ + 1234, + 455 + ], + [ + 1253, + 454 + ], + [ + 1279, + 455 + ], + [ + 1396, + 445 + ], + [ + 1779, + 436 + ], + [ + 2048, + 434 + ], + [ + 2048, + 370 + ], + [ + 2012, + 380 + ], + [ + 1987, + 392 + ], + [ + 1954, + 396 + ], + [ + 1941, + 396 + ], + [ + 1935, + 349 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1213, + 297 + ], + [ + 1206, + 293 + ], + [ + 1200, + 298 + ], + [ + 1200, + 304 + ], + [ + 1194, + 304 + ], + [ + 1192, + 296 + ], + [ + 1181, + 292 + ], + [ + 1183, + 286 + ], + [ + 1175, + 284 + ], + [ + 1167, + 284 + ], + [ + 1155, + 284 + ], + [ + 1151, + 278 + ], + [ + 1145, + 281 + ], + [ + 1146, + 272 + ], + [ + 1151, + 265 + ], + [ + 1144, + 261 + ], + [ + 1136, + 262 + ], + [ + 1132, + 253 + ], + [ + 1125, + 241 + ], + [ + 1118, + 243 + ], + [ + 1116, + 234 + ], + [ + 1110, + 228 + ], + [ + 1100, + 224 + ], + [ + 1093, + 228 + ], + [ + 1093, + 239 + ], + [ + 1085, + 237 + ], + [ + 1075, + 232 + ], + [ + 1075, + 224 + ], + [ + 1067, + 219 + ], + [ + 1055, + 219 + ], + [ + 1043, + 220 + ], + [ + 1043, + 228 + ], + [ + 1036, + 236 + ], + [ + 1035, + 243 + ], + [ + 1030, + 248 + ], + [ + 1021, + 256 + ], + [ + 1013, + 260 + ], + [ + 1009, + 260 + ], + [ + 1003, + 265 + ], + [ + 997, + 276 + ], + [ + 993, + 283 + ], + [ + 990, + 297 + ], + [ + 985, + 303 + ], + [ + 985, + 312 + ], + [ + 979, + 309 + ], + [ + 977, + 314 + ], + [ + 977, + 320 + ], + [ + 970, + 311 + ], + [ + 958, + 303 + ], + [ + 951, + 293 + ], + [ + 952, + 286 + ], + [ + 961, + 289 + ], + [ + 960, + 282 + ], + [ + 962, + 267 + ], + [ + 957, + 265 + ], + [ + 951, + 269 + ], + [ + 946, + 267 + ], + [ + 935, + 264 + ], + [ + 929, + 258 + ], + [ + 929, + 254 + ], + [ + 922, + 253 + ], + [ + 920, + 246 + ], + [ + 914, + 246 + ], + [ + 911, + 256 + ], + [ + 909, + 251 + ], + [ + 905, + 241 + ], + [ + 901, + 238 + ], + [ + 897, + 240 + ], + [ + 891, + 241 + ], + [ + 890, + 246 + ], + [ + 890, + 258 + ], + [ + 883, + 253 + ], + [ + 880, + 244 + ], + [ + 877, + 242 + ], + [ + 866, + 245 + ], + [ + 860, + 245 + ], + [ + 860, + 252 + ], + [ + 863, + 260 + ], + [ + 863, + 269 + ], + [ + 851, + 271 + ], + [ + 840, + 271 + ], + [ + 831, + 279 + ], + [ + 829, + 297 + ], + [ + 824, + 297 + ], + [ + 816, + 301 + ], + [ + 804, + 305 + ], + [ + 799, + 314 + ], + [ + 799, + 333 + ], + [ + 806, + 348 + ], + [ + 814, + 361 + ], + [ + 835, + 377 + ], + [ + 861, + 383 + ], + [ + 872, + 389 + ], + [ + 874, + 408 + ], + [ + 871, + 417 + ], + [ + 865, + 422 + ], + [ + 852, + 422 + ], + [ + 852, + 412 + ], + [ + 854, + 396 + ], + [ + 846, + 383 + ], + [ + 842, + 381 + ], + [ + 836, + 386 + ], + [ + 830, + 387 + ], + [ + 825, + 378 + ], + [ + 820, + 372 + ], + [ + 810, + 365 + ], + [ + 806, + 363 + ], + [ + 781, + 366 + ], + [ + 771, + 367 + ], + [ + 766, + 379 + ], + [ + 752, + 377 + ], + [ + 740, + 371 + ], + [ + 728, + 371 + ], + [ + 717, + 381 + ], + [ + 709, + 394 + ], + [ + 710, + 408 + ], + [ + 709, + 413 + ], + [ + 695, + 413 + ], + [ + 685, + 420 + ], + [ + 678, + 429 + ], + [ + 677, + 438 + ], + [ + 659, + 435 + ], + [ + 648, + 437 + ], + [ + 641, + 443 + ], + [ + 636, + 451 + ], + [ + 621, + 448 + ], + [ + 616, + 434 + ], + [ + 611, + 418 + ], + [ + 602, + 411 + ], + [ + 583, + 421 + ], + [ + 560, + 425 + ], + [ + 546, + 425 + ], + [ + 539, + 414 + ], + [ + 541, + 397 + ], + [ + 546, + 390 + ], + [ + 560, + 381 + ], + [ + 567, + 360 + ], + [ + 560, + 344 + ], + [ + 560, + 331 + ], + [ + 566, + 329 + ], + [ + 563, + 319 + ], + [ + 553, + 306 + ], + [ + 541, + 295 + ], + [ + 533, + 289 + ], + [ + 524, + 283 + ], + [ + 519, + 294 + ], + [ + 517, + 311 + ], + [ + 513, + 318 + ], + [ + 499, + 317 + ], + [ + 478, + 317 + ], + [ + 477, + 303 + ], + [ + 479, + 290 + ], + [ + 485, + 274 + ], + [ + 473, + 264 + ], + [ + 448, + 270 + ], + [ + 430, + 275 + ], + [ + 421, + 287 + ], + [ + 420, + 290 + ], + [ + 410, + 302 + ], + [ + 397, + 311 + ], + [ + 376, + 330 + ], + [ + 410, + 332 + ], + [ + 407, + 345 + ], + [ + 391, + 353 + ], + [ + 388, + 374 + ], + [ + 378, + 370 + ], + [ + 367, + 356 + ], + [ + 354, + 349 + ], + [ + 344, + 355 + ], + [ + 339, + 407 + ], + [ + 353, + 459 + ], + [ + 367, + 488 + ], + [ + 401, + 483 + ], + [ + 457, + 482 + ], + [ + 495, + 479 + ], + [ + 530, + 478 + ], + [ + 561, + 477 + ], + [ + 602, + 472 + ], + [ + 663, + 469 + ], + [ + 719, + 472 + ], + [ + 749, + 468 + ], + [ + 787, + 463 + ], + [ + 816, + 458 + ], + [ + 838, + 451 + ], + [ + 905, + 445 + ], + [ + 943, + 437 + ], + [ + 969, + 436 + ], + [ + 989, + 431 + ], + [ + 1006, + 429 + ], + [ + 1027, + 429 + ], + [ + 1037, + 430 + ], + [ + 1057, + 437 + ], + [ + 1121, + 437 + ], + [ + 1200, + 446 + ], + [ + 1226, + 453 + ], + [ + 1241, + 454 + ], + [ + 1252, + 450 + ], + [ + 1263, + 436 + ], + [ + 1265, + 420 + ], + [ + 1264, + 407 + ], + [ + 1257, + 397 + ], + [ + 1253, + 381 + ], + [ + 1247, + 374 + ], + [ + 1238, + 379 + ], + [ + 1234, + 389 + ], + [ + 1225, + 383 + ], + [ + 1231, + 364 + ], + [ + 1242, + 361 + ], + [ + 1249, + 341 + ], + [ + 1242, + 318 + ], + [ + 1232, + 312 + ], + [ + 1223, + 309 + ], + [ + 1216, + 302 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1005, + 425 + ], + [ + 999, + 425 + ], + [ + 997, + 425 + ], + [ + 995, + 421 + ], + [ + 990, + 419 + ], + [ + 987, + 423 + ], + [ + 982, + 420 + ], + [ + 975, + 420 + ], + [ + 969, + 423 + ], + [ + 964, + 423 + ], + [ + 955, + 423 + ], + [ + 949, + 423 + ], + [ + 943, + 423 + ], + [ + 941, + 430 + ], + [ + 939, + 425 + ], + [ + 934, + 425 + ], + [ + 927, + 428 + ], + [ + 926, + 431 + ], + [ + 922, + 429 + ], + [ + 916, + 429 + ], + [ + 909, + 429 + ], + [ + 909, + 429 + ], + [ + 902, + 431 + ], + [ + 901, + 436 + ], + [ + 898, + 440 + ], + [ + 897, + 443 + ], + [ + 897, + 451 + ], + [ + 898, + 462 + ], + [ + 899, + 463 + ], + [ + 902, + 457 + ], + [ + 912, + 454 + ], + [ + 932, + 453 + ], + [ + 949, + 449 + ], + [ + 960, + 449 + ], + [ + 964, + 446 + ], + [ + 971, + 439 + ], + [ + 979, + 436 + ], + [ + 985, + 435 + ], + [ + 989, + 432 + ], + [ + 995, + 433 + ], + [ + 1003, + 433 + ], + [ + 1004, + 430 + ], + [ + 1010, + 430 + ], + [ + 1012, + 430 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1023, + 417 + ], + [ + 1014, + 417 + ], + [ + 1012, + 423 + ], + [ + 1012, + 431 + ], + [ + 1019, + 432 + ], + [ + 1026, + 426 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1024, + 439 + ], + [ + 1021, + 439 + ], + [ + 1020, + 438 + ], + [ + 1012, + 438 + ], + [ + 1010, + 439 + ], + [ + 1007, + 439 + ], + [ + 1007, + 434 + ], + [ + 1008, + 430 + ], + [ + 1010, + 427 + ], + [ + 1013, + 426 + ], + [ + 1018, + 426 + ], + [ + 1023, + 427 + ], + [ + 1024, + 430 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 985, + 449 + ], + [ + 985, + 441 + ], + [ + 983, + 433 + ], + [ + 981, + 430 + ], + [ + 967, + 429 + ], + [ + 965, + 433 + ], + [ + 963, + 437 + ], + [ + 962, + 446 + ], + [ + 962, + 450 + ], + [ + 968, + 449 + ], + [ + 967, + 447 + ], + [ + 981, + 447 + ], + [ + 981, + 450 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 913, + 461 + ], + [ + 909, + 444 + ], + [ + 909, + 426 + ], + [ + 906, + 415 + ], + [ + 883, + 381 + ], + [ + 907, + 347 + ], + [ + 923, + 359 + ], + [ + 928, + 380 + ], + [ + 913, + 404 + ], + [ + 912, + 425 + ], + [ + 912, + 450 + ], + [ + 917, + 454 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 887, + 394 + ], + [ + 887, + 416 + ], + [ + 903, + 415 + ], + [ + 903, + 394 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 859, + 394 + ], + [ + 860, + 417 + ], + [ + 886, + 417 + ], + [ + 884, + 392 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 879, + 450 + ], + [ + 882, + 439 + ], + [ + 885, + 437 + ], + [ + 896, + 437 + ], + [ + 898, + 439 + ], + [ + 902, + 446 + ], + [ + 903, + 454 + ], + [ + 903, + 461 + ], + [ + 898, + 464 + ], + [ + 896, + 464 + ], + [ + 893, + 469 + ], + [ + 885, + 469 + ], + [ + 877, + 460 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 860, + 471 + ], + [ + 851, + 472 + ], + [ + 844, + 472 + ], + [ + 844, + 464 + ], + [ + 844, + 459 + ], + [ + 844, + 454 + ], + [ + 845, + 447 + ], + [ + 851, + 438 + ], + [ + 855, + 435 + ], + [ + 865, + 434 + ], + [ + 882, + 434 + ], + [ + 885, + 439 + ], + [ + 891, + 447 + ], + [ + 893, + 455 + ], + [ + 893, + 464 + ], + [ + 891, + 470 + ], + [ + 887, + 473 + ], + [ + 882, + 473 + ], + [ + 879, + 470 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 968, + 468 + ], + [ + 962, + 470 + ], + [ + 954, + 466 + ], + [ + 936, + 450 + ], + [ + 924, + 440 + ], + [ + 926, + 435 + ], + [ + 931, + 431 + ], + [ + 943, + 431 + ], + [ + 955, + 430 + ], + [ + 959, + 435 + ], + [ + 962, + 443 + ], + [ + 965, + 448 + ], + [ + 969, + 453 + ], + [ + 970, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 955, + 473 + ], + [ + 949, + 472 + ], + [ + 949, + 468 + ], + [ + 921, + 470 + ], + [ + 919, + 474 + ], + [ + 912, + 474 + ], + [ + 910, + 470 + ], + [ + 910, + 462 + ], + [ + 911, + 457 + ], + [ + 914, + 443 + ], + [ + 916, + 437 + ], + [ + 920, + 436 + ], + [ + 938, + 435 + ], + [ + 951, + 436 + ], + [ + 955, + 448 + ], + [ + 956, + 460 + ], + [ + 956, + 466 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 930, + 290 + ], + [ + 930, + 319 + ], + [ + 918, + 320 + ], + [ + 919, + 290 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 834, + 477 + ], + [ + 829, + 477 + ], + [ + 824, + 345 + ], + [ + 824, + 333 + ], + [ + 829, + 325 + ], + [ + 839, + 318 + ], + [ + 906, + 298 + ], + [ + 917, + 297 + ], + [ + 920, + 297 + ], + [ + 920, + 300 + ], + [ + 841, + 321 + ], + [ + 832, + 327 + ], + [ + 828, + 337 + ], + [ + 828, + 339 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 802, + 389 + ], + [ + 815, + 411 + ], + [ + 827, + 390 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 812, + 371 + ], + [ + 812, + 363 + ], + [ + 825, + 363 + ], + [ + 824, + 372 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 799, + 437 + ], + [ + 813, + 437 + ], + [ + 813, + 462 + ], + [ + 777, + 463 + ], + [ + 774, + 459 + ], + [ + 772, + 446 + ], + [ + 761, + 444 + ], + [ + 761, + 437 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 755, + 445 + ], + [ + 755, + 469 + ], + [ + 759, + 468 + ], + [ + 759, + 446 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 740, + 464 + ], + [ + 740, + 471 + ], + [ + 742, + 471 + ], + [ + 744, + 446 + ], + [ + 742, + 446 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 721, + 449 + ], + [ + 725, + 449 + ], + [ + 726, + 472 + ], + [ + 723, + 472 + ], + [ + 722, + 472 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 700, + 450 + ], + [ + 706, + 473 + ], + [ + 700, + 473 + ], + [ + 697, + 452 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 691, + 450 + ], + [ + 695, + 471 + ], + [ + 698, + 471 + ], + [ + 693, + 447 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 686, + 453 + ], + [ + 686, + 471 + ], + [ + 691, + 471 + ], + [ + 690, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 584, + 457 + ], + [ + 576, + 460 + ], + [ + 573, + 464 + ], + [ + 572, + 473 + ], + [ + 574, + 475 + ], + [ + 585, + 475 + ], + [ + 592, + 475 + ], + [ + 602, + 475 + ], + [ + 619, + 476 + ], + [ + 632, + 477 + ], + [ + 644, + 474 + ], + [ + 658, + 473 + ], + [ + 666, + 470 + ], + [ + 671, + 466 + ], + [ + 671, + 457 + ], + [ + 669, + 452 + ], + [ + 659, + 446 + ], + [ + 652, + 441 + ], + [ + 643, + 440 + ], + [ + 617, + 441 + ], + [ + 609, + 445 + ], + [ + 599, + 453 + ], + [ + 595, + 454 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 659, + 459 + ], + [ + 658, + 473 + ], + [ + 656, + 474 + ], + [ + 656, + 450 + ], + [ + 658, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 614, + 450 + ], + [ + 616, + 474 + ], + [ + 612, + 474 + ], + [ + 610, + 449 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 561, + 451 + ], + [ + 561, + 477 + ], + [ + 557, + 477 + ], + [ + 558, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 751, + 407 + ], + [ + 752, + 478 + ], + [ + 755, + 478 + ], + [ + 753, + 399 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 751, + 391 + ], + [ + 747, + 392 + ], + [ + 745, + 395 + ], + [ + 745, + 399 + ], + [ + 747, + 404 + ], + [ + 751, + 404 + ], + [ + 757, + 403 + ], + [ + 757, + 397 + ], + [ + 757, + 394 + ], + [ + 755, + 392 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 745, + 412 + ], + [ + 745, + 403 + ], + [ + 758, + 403 + ], + [ + 757, + 412 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 764, + 469 + ], + [ + 764, + 482 + ], + [ + 759, + 482 + ], + [ + 757, + 384 + ], + [ + 763, + 385 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 772, + 387 + ], + [ + 773, + 393 + ], + [ + 771, + 395 + ], + [ + 773, + 396 + ], + [ + 773, + 399 + ], + [ + 770, + 401 + ], + [ + 773, + 402 + ], + [ + 774, + 406 + ], + [ + 771, + 408 + ], + [ + 762, + 409 + ], + [ + 762, + 389 + ], + [ + 762, + 389 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 520, + 242 + ], + [ + 536, + 467 + ], + [ + 539, + 468 + ], + [ + 524, + 216 + ], + [ + 520, + 215 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 506, + 217 + ], + [ + 505, + 213 + ], + [ + 520, + 214 + ], + [ + 523, + 218 + ], + [ + 519, + 221 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 520, + 417 + ], + [ + 519, + 393 + ], + [ + 528, + 393 + ], + [ + 530, + 420 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 548, + 483 + ], + [ + 534, + 485 + ], + [ + 534, + 485 + ], + [ + 529, + 197 + ], + [ + 530, + 193 + ], + [ + 528, + 101 + ], + [ + 529, + 95 + ], + [ + 530, + 26 + ], + [ + 534, + 27 + ], + [ + 534, + 97 + ], + [ + 538, + 100 + ], + [ + 538, + 196 + ], + [ + 540, + 201 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 447, + 375 + ], + [ + 451, + 376 + ], + [ + 458, + 490 + ], + [ + 450, + 490 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 461, + 381 + ], + [ + 461, + 417 + ], + [ + 453, + 416 + ], + [ + 452, + 383 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 367, + 370 + ], + [ + 372, + 482 + ], + [ + 410, + 481 + ], + [ + 409, + 367 + ], + [ + 409, + 366 + ], + [ + 380, + 367 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 367, + 237 + ], + [ + 371, + 279 + ], + [ + 358, + 276 + ], + [ + 355, + 277 + ], + [ + 353, + 271 + ], + [ + 352, + 271 + ], + [ + 349, + 265 + ], + [ + 354, + 264 + ], + [ + 354, + 260 + ], + [ + 349, + 260 + ], + [ + 349, + 255 + ], + [ + 354, + 255 + ], + [ + 355, + 249 + ], + [ + 349, + 248 + ], + [ + 349, + 243 + ], + [ + 353, + 242 + ], + [ + 353, + 233 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 352, + 231 + ], + [ + 339, + 230 + ], + [ + 339, + 241 + ], + [ + 334, + 241 + ], + [ + 335, + 242 + ], + [ + 338, + 243 + ], + [ + 339, + 251 + ], + [ + 334, + 251 + ], + [ + 336, + 254 + ], + [ + 340, + 256 + ], + [ + 340, + 264 + ], + [ + 336, + 262 + ], + [ + 336, + 265 + ], + [ + 339, + 267 + ], + [ + 342, + 278 + ], + [ + 353, + 278 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 416, + 492 + ], + [ + 417, + 497 + ], + [ + 408, + 497 + ], + [ + 408, + 464 + ], + [ + 405, + 368 + ], + [ + 404, + 322 + ], + [ + 401, + 281 + ], + [ + 392, + 268 + ], + [ + 385, + 256 + ], + [ + 373, + 249 + ], + [ + 358, + 247 + ], + [ + 353, + 247 + ], + [ + 353, + 239 + ], + [ + 365, + 242 + ], + [ + 381, + 248 + ], + [ + 396, + 263 + ], + [ + 406, + 275 + ], + [ + 408, + 297 + ], + [ + 409, + 373 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 404, + 374 + ], + [ + 403, + 406 + ], + [ + 394, + 408 + ], + [ + 394, + 405 + ], + [ + 387, + 403 + ], + [ + 390, + 399 + ], + [ + 394, + 399 + ], + [ + 394, + 393 + ], + [ + 390, + 393 + ], + [ + 390, + 389 + ], + [ + 395, + 387 + ], + [ + 395, + 383 + ], + [ + 390, + 381 + ], + [ + 390, + 377 + ], + [ + 394, + 377 + ], + [ + 396, + 374 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 423, + 373 + ], + [ + 422, + 406 + ], + [ + 410, + 405 + ], + [ + 411, + 373 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 281, + 266 + ], + [ + 277, + 214 + ], + [ + 260, + 216 + ], + [ + 261, + 264 + ], + [ + 270, + 266 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 232, + 285 + ], + [ + 233, + 298 + ], + [ + 230, + 321 + ], + [ + 222, + 324 + ], + [ + 180, + 322 + ], + [ + 174, + 316 + ], + [ + 174, + 286 + ], + [ + 174, + 283 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 228, + 525 + ], + [ + 219, + 525 + ], + [ + 213, + 408 + ], + [ + 211, + 286 + ], + [ + 213, + 241 + ], + [ + 219, + 231 + ], + [ + 256, + 217 + ], + [ + 285, + 222 + ], + [ + 284, + 228 + ], + [ + 258, + 222 + ], + [ + 229, + 234 + ], + [ + 224, + 238 + ], + [ + 219, + 244 + ], + [ + 218, + 281 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 192, + 342 + ], + [ + 191, + 337 + ], + [ + 183, + 334 + ], + [ + 186, + 321 + ], + [ + 213, + 323 + ], + [ + 217, + 323 + ], + [ + 215, + 336 + ], + [ + 207, + 336 + ], + [ + 208, + 342 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 242, + 375 + ], + [ + 230, + 372 + ], + [ + 224, + 375 + ], + [ + 217, + 384 + ], + [ + 217, + 392 + ], + [ + 223, + 400 + ], + [ + 233, + 407 + ], + [ + 245, + 407 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 200, + 408 + ], + [ + 189, + 408 + ], + [ + 180, + 397 + ], + [ + 182, + 385 + ], + [ + 187, + 374 + ], + [ + 193, + 373 + ], + [ + 202, + 377 + ], + [ + 210, + 381 + ], + [ + 212, + 388 + ], + [ + 213, + 399 + ], + [ + 208, + 406 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 185, + 460 + ], + [ + 139, + 457 + ], + [ + 138, + 441 + ], + [ + 102, + 441 + ], + [ + 71, + 443 + ], + [ + 73, + 513 + ], + [ + 126, + 512 + ], + [ + 131, + 514 + ], + [ + 131, + 517 + ], + [ + 131, + 518 + ], + [ + 155, + 518 + ], + [ + 176, + 517 + ], + [ + 186, + 519 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 19, + 445 + ], + [ + 63, + 443 + ], + [ + 65, + 521 + ], + [ + 63, + 514 + ], + [ + 25, + 514 + ], + [ + 21, + 517 + ], + [ + 18, + 517 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 70, + 325 + ], + [ + 73, + 531 + ], + [ + 62, + 531 + ], + [ + 63, + 312 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 51, + 321 + ], + [ + 55, + 418 + ], + [ + 68, + 418 + ], + [ + 62, + 309 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 47, + 519 + ], + [ + 35, + 520 + ], + [ + 24, + 84 + ], + [ + 37, + 81 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 140, + 100 + ], + [ + 133, + 66 + ], + [ + 138, + 29 + ], + [ + 140, + 0 + ], + [ + 32, + 0 + ], + [ + 40, + 327 + ], + [ + 62, + 332 + ], + [ + 77, + 338 + ], + [ + 117, + 343 + ], + [ + 157, + 351 + ], + [ + 161, + 312 + ], + [ + 149, + 285 + ], + [ + 115, + 200 + ], + [ + 95, + 159 + ], + [ + 82, + 132 + ], + [ + 63, + 119 + ], + [ + 49, + 100 + ], + [ + 48, + 93 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 24, + 610 + ], + [ + 0, + 608 + ], + [ + 0, + 0 + ], + [ + 136, + 0 + ], + [ + 144, + 0 + ], + [ + 144, + 5 + ], + [ + 143, + 11 + ], + [ + 140, + 26 + ], + [ + 114, + 33 + ], + [ + 100, + 48 + ], + [ + 80, + 58 + ], + [ + 68, + 71 + ], + [ + 68, + 86 + ], + [ + 52, + 96 + ], + [ + 52, + 98 + ], + [ + 52, + 109 + ], + [ + 34, + 113 + ], + [ + 15, + 120 + ], + [ + 15, + 137 + ], + [ + 16, + 151 + ], + [ + 22, + 178 + ], + [ + 21, + 207 + ], + [ + 26, + 229 + ], + [ + 26, + 244 + ], + [ + 17, + 262 + ], + [ + 30, + 283 + ], + [ + 30, + 337 + ], + [ + 30, + 408 + ], + [ + 28, + 437 + ], + [ + 22, + 485 + ], + [ + 23, + 519 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 85, + 639 + ], + [ + 81, + 628 + ], + [ + 73, + 622 + ], + [ + 59, + 619 + ], + [ + 3, + 621 + ], + [ + 0, + 622 + ], + [ + 0, + 633 + ], + [ + 3, + 631 + ], + [ + 10, + 628 + ], + [ + 44, + 626 + ], + [ + 56, + 626 + ], + [ + 65, + 627 + ], + [ + 73, + 634 + ], + [ + 75, + 638 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 118, + 639 + ], + [ + 117, + 626 + ], + [ + 120, + 613 + ], + [ + 124, + 607 + ], + [ + 188, + 592 + ], + [ + 196, + 596 + ], + [ + 200, + 602 + ], + [ + 201, + 614 + ], + [ + 201, + 621 + ], + [ + 201, + 624 + ], + [ + 195, + 623 + ], + [ + 194, + 610 + ], + [ + 189, + 602 + ], + [ + 187, + 600 + ], + [ + 134, + 614 + ], + [ + 131, + 614 + ], + [ + 127, + 624 + ], + [ + 128, + 630 + ], + [ + 128, + 634 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 245, + 609 + ], + [ + 246, + 593 + ], + [ + 249, + 581 + ], + [ + 252, + 574 + ], + [ + 298, + 566 + ], + [ + 305, + 570 + ], + [ + 309, + 583 + ], + [ + 307, + 594 + ], + [ + 303, + 600 + ], + [ + 301, + 588 + ], + [ + 300, + 575 + ], + [ + 300, + 572 + ], + [ + 257, + 581 + ], + [ + 256, + 588 + ], + [ + 254, + 602 + ], + [ + 254, + 610 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 702, + 47 + ], + [ + 702, + 40 + ], + [ + 713, + 38 + ], + [ + 726, + 44 + ], + [ + 726, + 50 + ], + [ + 718, + 53 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 769, + 111 + ], + [ + 768, + 104 + ], + [ + 776, + 104 + ], + [ + 790, + 109 + ], + [ + 789, + 113 + ], + [ + 782, + 117 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1017, + 143 + ], + [ + 1017, + 138 + ], + [ + 1027, + 137 + ], + [ + 1034, + 139 + ], + [ + 1034, + 143 + ], + [ + 1030, + 145 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1040, + 450 + ], + [ + 1035, + 447 + ], + [ + 1035, + 442 + ], + [ + 1035, + 440 + ], + [ + 1032, + 437 + ], + [ + 1030, + 435 + ], + [ + 1030, + 431 + ], + [ + 1032, + 427 + ], + [ + 1037, + 425 + ], + [ + 1047, + 425 + ], + [ + 1054, + 425 + ], + [ + 1058, + 425 + ], + [ + 1067, + 427 + ], + [ + 1072, + 427 + ], + [ + 1082, + 425 + ], + [ + 1086, + 426 + ], + [ + 1092, + 426 + ], + [ + 1105, + 426 + ], + [ + 1119, + 426 + ], + [ + 1135, + 427 + ], + [ + 1148, + 428 + ], + [ + 1149, + 433 + ], + [ + 1149, + 442 + ], + [ + 1143, + 449 + ], + [ + 1118, + 450 + ], + [ + 1085, + 453 + ], + [ + 1072, + 457 + ], + [ + 1059, + 459 + ], + [ + 1055, + 460 + ], + [ + 1051, + 456 + ], + [ + 1047, + 456 + ], + [ + 1045, + 450 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1048, + 415 + ], + [ + 1052, + 425 + ], + [ + 1062, + 425 + ], + [ + 1062, + 421 + ], + [ + 1063, + 413 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1096, + 468 + ], + [ + 1075, + 467 + ], + [ + 1067, + 466 + ], + [ + 1065, + 458 + ], + [ + 1068, + 446 + ], + [ + 1075, + 442 + ], + [ + 1096, + 437 + ], + [ + 1115, + 435 + ], + [ + 1129, + 436 + ], + [ + 1139, + 437 + ], + [ + 1141, + 452 + ], + [ + 1137, + 465 + ], + [ + 1128, + 467 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1069, + 469 + ], + [ + 1065, + 469 + ], + [ + 1064, + 408 + ], + [ + 1067, + 408 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1076, + 417 + ], + [ + 1077, + 392 + ], + [ + 1055, + 395 + ], + [ + 1054, + 416 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1149, + 349 + ], + [ + 1151, + 433 + ], + [ + 1155, + 433 + ], + [ + 1153, + 348 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1169, + 359 + ], + [ + 1169, + 349 + ], + [ + 1153, + 349 + ], + [ + 1153, + 359 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1238, + 427 + ], + [ + 1231, + 428 + ], + [ + 1235, + 447 + ], + [ + 1238, + 454 + ], + [ + 1255, + 454 + ], + [ + 1260, + 453 + ], + [ + 1271, + 452 + ], + [ + 1275, + 443 + ], + [ + 1271, + 426 + ], + [ + 1263, + 425 + ], + [ + 1250, + 424 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1264, + 123 + ], + [ + 1271, + 124 + ], + [ + 1278, + 463 + ], + [ + 1272, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1171, + 505 + ], + [ + 1167, + 509 + ], + [ + 1158, + 509 + ], + [ + 1153, + 506 + ], + [ + 1150, + 499 + ], + [ + 1150, + 497 + ], + [ + 1134, + 494 + ], + [ + 1133, + 501 + ], + [ + 1133, + 504 + ], + [ + 1127, + 504 + ], + [ + 1119, + 501 + ], + [ + 1116, + 491 + ], + [ + 1114, + 477 + ], + [ + 1114, + 466 + ], + [ + 1118, + 456 + ], + [ + 1123, + 444 + ], + [ + 1126, + 438 + ], + [ + 1133, + 434 + ], + [ + 1152, + 430 + ], + [ + 1183, + 426 + ], + [ + 1195, + 427 + ], + [ + 1207, + 430 + ], + [ + 1220, + 439 + ], + [ + 1228, + 450 + ], + [ + 1234, + 447 + ], + [ + 1240, + 447 + ], + [ + 1240, + 452 + ], + [ + 1239, + 454 + ], + [ + 1248, + 466 + ], + [ + 1250, + 475 + ], + [ + 1256, + 482 + ], + [ + 1256, + 493 + ], + [ + 1250, + 504 + ], + [ + 1242, + 507 + ], + [ + 1234, + 506 + ], + [ + 1232, + 503 + ], + [ + 1229, + 498 + ], + [ + 1192, + 499 + ], + [ + 1183, + 501 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1287, + 389 + ], + [ + 1374, + 386 + ], + [ + 1389, + 384 + ], + [ + 1384, + 367 + ], + [ + 1392, + 339 + ], + [ + 1412, + 325 + ], + [ + 1421, + 335 + ], + [ + 1421, + 311 + ], + [ + 1423, + 276 + ], + [ + 1427, + 253 + ], + [ + 1430, + 216 + ], + [ + 1434, + 189 + ], + [ + 1449, + 159 + ], + [ + 1460, + 161 + ], + [ + 1467, + 193 + ], + [ + 1472, + 210 + ], + [ + 1473, + 240 + ], + [ + 1487, + 293 + ], + [ + 1492, + 332 + ], + [ + 1501, + 355 + ], + [ + 1501, + 381 + ], + [ + 1534, + 381 + ], + [ + 1577, + 382 + ], + [ + 1628, + 376 + ], + [ + 1646, + 377 + ], + [ + 1645, + 356 + ], + [ + 1639, + 312 + ], + [ + 1628, + 269 + ], + [ + 1614, + 245 + ], + [ + 1563, + 213 + ], + [ + 1536, + 201 + ], + [ + 1526, + 173 + ], + [ + 1517, + 131 + ], + [ + 1529, + 107 + ], + [ + 1529, + 81 + ], + [ + 1540, + 77 + ], + [ + 1551, + 82 + ], + [ + 1571, + 71 + ], + [ + 1557, + 61 + ], + [ + 1557, + 41 + ], + [ + 1565, + 28 + ], + [ + 1593, + 34 + ], + [ + 1614, + 41 + ], + [ + 1629, + 41 + ], + [ + 1628, + 35 + ], + [ + 1621, + 29 + ], + [ + 1628, + 18 + ], + [ + 1627, + 12 + ], + [ + 1616, + 7 + ], + [ + 1617, + 0 + ], + [ + 1792, + 0 + ], + [ + 1782, + 0 + ], + [ + 1768, + 20 + ], + [ + 1751, + 44 + ], + [ + 1752, + 60 + ], + [ + 1750, + 72 + ], + [ + 1757, + 86 + ], + [ + 1762, + 100 + ], + [ + 1737, + 119 + ], + [ + 1717, + 140 + ], + [ + 1712, + 152 + ], + [ + 1727, + 144 + ], + [ + 1741, + 147 + ], + [ + 1741, + 162 + ], + [ + 1726, + 187 + ], + [ + 1714, + 199 + ], + [ + 1718, + 214 + ], + [ + 1707, + 232 + ], + [ + 1690, + 242 + ], + [ + 1675, + 266 + ], + [ + 1669, + 286 + ], + [ + 1660, + 367 + ], + [ + 1667, + 376 + ], + [ + 1728, + 374 + ], + [ + 1754, + 376 + ], + [ + 1784, + 377 + ], + [ + 1820, + 370 + ], + [ + 1847, + 370 + ], + [ + 1872, + 370 + ], + [ + 1880, + 376 + ], + [ + 1889, + 425 + ], + [ + 1889, + 437 + ], + [ + 1871, + 442 + ], + [ + 1820, + 448 + ], + [ + 1766, + 446 + ], + [ + 1701, + 445 + ], + [ + 1631, + 448 + ], + [ + 1562, + 448 + ], + [ + 1473, + 452 + ], + [ + 1373, + 457 + ], + [ + 1354, + 459 + ], + [ + 1311, + 461 + ], + [ + 1283, + 461 + ], + [ + 1278, + 459 + ], + [ + 1277, + 398 + ], + [ + 1280, + 393 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1455, + 249 + ], + [ + 1448, + 248 + ], + [ + 1435, + 250 + ], + [ + 1427, + 255 + ], + [ + 1414, + 269 + ], + [ + 1410, + 282 + ], + [ + 1411, + 303 + ], + [ + 1411, + 317 + ], + [ + 1411, + 442 + ], + [ + 1406, + 442 + ], + [ + 1404, + 351 + ], + [ + 1405, + 307 + ], + [ + 1405, + 279 + ], + [ + 1414, + 260 + ], + [ + 1427, + 249 + ], + [ + 1444, + 246 + ], + [ + 1459, + 245 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1452, + 270 + ], + [ + 1452, + 237 + ], + [ + 1469, + 236 + ], + [ + 1470, + 268 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1394, + 388 + ], + [ + 1395, + 359 + ], + [ + 1405, + 359 + ], + [ + 1405, + 388 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1421, + 388 + ], + [ + 1410, + 387 + ], + [ + 1408, + 358 + ], + [ + 1418, + 358 + ], + [ + 1418, + 360 + ], + [ + 1421, + 361 + ], + [ + 1423, + 363 + ], + [ + 1422, + 368 + ], + [ + 1418, + 370 + ], + [ + 1419, + 372 + ], + [ + 1423, + 372 + ], + [ + 1422, + 376 + ], + [ + 1420, + 379 + ], + [ + 1419, + 381 + ], + [ + 1423, + 382 + ], + [ + 1423, + 385 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1550, + 281 + ], + [ + 1556, + 461 + ], + [ + 1548, + 460 + ], + [ + 1541, + 283 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1559, + 355 + ], + [ + 1557, + 309 + ], + [ + 1537, + 311 + ], + [ + 1540, + 356 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1585, + 418 + ], + [ + 1575, + 423 + ], + [ + 1562, + 426 + ], + [ + 1563, + 438 + ], + [ + 1573, + 444 + ], + [ + 1588, + 445 + ], + [ + 1609, + 444 + ], + [ + 1628, + 443 + ], + [ + 1654, + 443 + ], + [ + 1662, + 444 + ], + [ + 1673, + 442 + ], + [ + 1671, + 426 + ], + [ + 1665, + 415 + ], + [ + 1656, + 407 + ], + [ + 1648, + 402 + ], + [ + 1630, + 401 + ], + [ + 1610, + 405 + ], + [ + 1598, + 410 + ], + [ + 1589, + 417 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1529, + 410 + ], + [ + 1478, + 410 + ], + [ + 1453, + 414 + ], + [ + 1443, + 417 + ], + [ + 1427, + 425 + ], + [ + 1415, + 433 + ], + [ + 1411, + 435 + ], + [ + 1389, + 438 + ], + [ + 1375, + 441 + ], + [ + 1372, + 452 + ], + [ + 1372, + 461 + ], + [ + 1381, + 466 + ], + [ + 1411, + 467 + ], + [ + 1475, + 468 + ], + [ + 1512, + 468 + ], + [ + 1526, + 473 + ], + [ + 1535, + 470 + ], + [ + 1542, + 464 + ], + [ + 1551, + 456 + ], + [ + 1551, + 441 + ], + [ + 1545, + 430 + ], + [ + 1538, + 417 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1783, + 415 + ], + [ + 1784, + 447 + ], + [ + 1799, + 448 + ], + [ + 1797, + 416 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1807, + 409 + ], + [ + 1808, + 448 + ], + [ + 1856, + 447 + ], + [ + 1855, + 409 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1906, + 423 + ], + [ + 1902, + 404 + ], + [ + 1917, + 395 + ], + [ + 1939, + 395 + ], + [ + 1968, + 398 + ], + [ + 1970, + 378 + ], + [ + 1971, + 347 + ], + [ + 1971, + 333 + ], + [ + 1913, + 250 + ], + [ + 1872, + 218 + ], + [ + 1856, + 220 + ], + [ + 1852, + 228 + ], + [ + 1832, + 229 + ], + [ + 1830, + 238 + ], + [ + 1824, + 247 + ], + [ + 1808, + 242 + ], + [ + 1804, + 217 + ], + [ + 1812, + 212 + ], + [ + 1827, + 206 + ], + [ + 1838, + 197 + ], + [ + 1829, + 184 + ], + [ + 1808, + 182 + ], + [ + 1802, + 189 + ], + [ + 1785, + 192 + ], + [ + 1764, + 185 + ], + [ + 1754, + 165 + ], + [ + 1753, + 146 + ], + [ + 1754, + 134 + ], + [ + 1752, + 120 + ], + [ + 1769, + 113 + ], + [ + 1790, + 113 + ], + [ + 1808, + 100 + ], + [ + 1806, + 99 + ], + [ + 1779, + 102 + ], + [ + 1764, + 88 + ], + [ + 1765, + 63 + ], + [ + 1732, + 62 + ], + [ + 1739, + 17 + ], + [ + 1773, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 269 + ], + [ + 2045, + 368 + ], + [ + 2022, + 373 + ], + [ + 2008, + 384 + ], + [ + 2009, + 434 + ], + [ + 1905, + 435 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1896, + 421 + ], + [ + 1883, + 429 + ], + [ + 1863, + 431 + ], + [ + 1847, + 434 + ], + [ + 1840, + 441 + ], + [ + 1838, + 447 + ], + [ + 1846, + 456 + ], + [ + 1892, + 456 + ], + [ + 1981, + 458 + ], + [ + 2006, + 456 + ], + [ + 2011, + 450 + ], + [ + 2016, + 443 + ], + [ + 2018, + 434 + ], + [ + 2013, + 426 + ], + [ + 2004, + 423 + ], + [ + 1992, + 425 + ], + [ + 1981, + 424 + ], + [ + 1978, + 424 + ], + [ + 1967, + 418 + ], + [ + 1955, + 412 + ], + [ + 1944, + 411 + ], + [ + 1920, + 413 + ], + [ + 1904, + 417 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1958, + 336 + ], + [ + 1960, + 454 + ], + [ + 1935, + 454 + ], + [ + 1939, + 338 + ], + [ + 1939, + 327 + ], + [ + 1957, + 325 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 2013, + 187 + ], + [ + 2013, + 330 + ], + [ + 1993, + 329 + ], + [ + 1898, + 339 + ], + [ + 1896, + 205 + ], + [ + 1982, + 187 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1584, + 460 + ], + [ + 1575, + 464 + ], + [ + 1583, + 469 + ], + [ + 1614, + 470 + ], + [ + 1663, + 468 + ], + [ + 1697, + 463 + ], + [ + 1729, + 463 + ], + [ + 1725, + 465 + ], + [ + 1733, + 474 + ], + [ + 1760, + 477 + ], + [ + 1852, + 479 + ], + [ + 1886, + 482 + ], + [ + 1984, + 482 + ], + [ + 2032, + 481 + ], + [ + 2048, + 478 + ], + [ + 2048, + 447 + ], + [ + 2047, + 442 + ], + [ + 2009, + 447 + ], + [ + 1989, + 447 + ], + [ + 1968, + 448 + ], + [ + 1955, + 450 + ], + [ + 1939, + 447 + ], + [ + 1920, + 446 + ], + [ + 1902, + 451 + ], + [ + 1876, + 453 + ], + [ + 1852, + 453 + ], + [ + 1842, + 449 + ], + [ + 1819, + 447 + ], + [ + 1799, + 447 + ], + [ + 1776, + 446 + ], + [ + 1747, + 446 + ], + [ + 1717, + 447 + ], + [ + 1687, + 447 + ], + [ + 1645, + 447 + ], + [ + 1607, + 451 + ], + [ + 1586, + 456 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1591, + 294 + ], + [ + 1595, + 469 + ], + [ + 1602, + 470 + ], + [ + 1594, + 269 + ], + [ + 1589, + 268 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1582, + 335 + ], + [ + 1581, + 372 + ], + [ + 1588, + 375 + ], + [ + 1591, + 374 + ], + [ + 1592, + 333 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1575, + 341 + ], + [ + 1578, + 351 + ], + [ + 1578, + 358 + ], + [ + 1574, + 362 + ], + [ + 1572, + 362 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1429, + 341 + ], + [ + 1432, + 462 + ], + [ + 1435, + 462 + ], + [ + 1432, + 341 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1428, + 487 + ], + [ + 1382, + 487 + ], + [ + 1363, + 480 + ], + [ + 1365, + 469 + ], + [ + 1375, + 467 + ], + [ + 1393, + 461 + ], + [ + 1414, + 459 + ], + [ + 1430, + 459 + ], + [ + 1461, + 459 + ], + [ + 1488, + 464 + ], + [ + 1519, + 470 + ], + [ + 1536, + 477 + ], + [ + 1543, + 487 + ], + [ + 1534, + 491 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1432, + 339 + ], + [ + 1423, + 340 + ], + [ + 1421, + 349 + ], + [ + 1421, + 361 + ], + [ + 1431, + 363 + ], + [ + 1432, + 356 + ], + [ + 1433, + 344 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1485, + 528 + ], + [ + 1472, + 528 + ], + [ + 1466, + 259 + ], + [ + 1474, + 260 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1460, + 311 + ], + [ + 1446, + 314 + ], + [ + 1446, + 321 + ], + [ + 1447, + 322 + ], + [ + 1457, + 325 + ], + [ + 1457, + 332 + ], + [ + 1446, + 335 + ], + [ + 1446, + 339 + ], + [ + 1453, + 343 + ], + [ + 1459, + 344 + ], + [ + 1460, + 350 + ], + [ + 1449, + 353 + ], + [ + 1446, + 355 + ], + [ + 1447, + 360 + ], + [ + 1460, + 363 + ], + [ + 1464, + 364 + ], + [ + 1471, + 366 + ], + [ + 1468, + 310 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1478, + 310 + ], + [ + 1477, + 357 + ], + [ + 1474, + 369 + ], + [ + 1462, + 367 + ], + [ + 1462, + 320 + ], + [ + 1462, + 310 + ], + [ + 1467, + 306 + ], + [ + 1469, + 306 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1061, + 68 + ], + [ + 1101, + 67 + ], + [ + 1111, + 67 + ], + [ + 1276, + 100 + ], + [ + 1293, + 106 + ], + [ + 1305, + 116 + ], + [ + 1316, + 129 + ], + [ + 1323, + 147 + ], + [ + 1324, + 167 + ], + [ + 1327, + 270 + ], + [ + 1322, + 278 + ], + [ + 1329, + 532 + ], + [ + 1341, + 535 + ], + [ + 1337, + 274 + ], + [ + 1333, + 269 + ], + [ + 1331, + 157 + ], + [ + 1329, + 142 + ], + [ + 1315, + 118 + ], + [ + 1302, + 104 + ], + [ + 1282, + 96 + ], + [ + 1104, + 62 + ], + [ + 1055, + 62 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1022, + 48 + ], + [ + 1024, + 117 + ], + [ + 1026, + 123 + ], + [ + 1029, + 125 + ], + [ + 1032, + 127 + ], + [ + 1036, + 127 + ], + [ + 1060, + 126 + ], + [ + 1064, + 124 + ], + [ + 1067, + 121 + ], + [ + 1069, + 113 + ], + [ + 1065, + 48 + ], + [ + 1065, + 45 + ], + [ + 1061, + 42 + ], + [ + 1057, + 40 + ], + [ + 1030, + 40 + ], + [ + 1027, + 42 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1396, + 298 + ], + [ + 1369, + 359 + ], + [ + 1335, + 299 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1317, + 293 + ], + [ + 1339, + 295 + ], + [ + 1340, + 356 + ], + [ + 1318, + 357 + ], + [ + 1318, + 353 + ], + [ + 1312, + 348 + ], + [ + 1318, + 343 + ], + [ + 1320, + 340 + ], + [ + 1319, + 331 + ], + [ + 1314, + 328 + ], + [ + 1317, + 322 + ], + [ + 1319, + 318 + ], + [ + 1319, + 311 + ], + [ + 1314, + 309 + ], + [ + 1314, + 300 + ], + [ + 1317, + 298 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1383, + 548 + ], + [ + 1370, + 548 + ], + [ + 1361, + 1 + ], + [ + 1371, + 1 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1628, + 527 + ], + [ + 1619, + 527 + ], + [ + 1612, + 201 + ], + [ + 1621, + 201 + ], + [ + 1622, + 201 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1461, + 211 + ], + [ + 1464, + 287 + ], + [ + 1465, + 297 + ], + [ + 1470, + 298 + ], + [ + 1476, + 298 + ], + [ + 1602, + 292 + ], + [ + 1610, + 292 + ], + [ + 1612, + 289 + ], + [ + 1612, + 282 + ], + [ + 1612, + 204 + ], + [ + 1610, + 200 + ], + [ + 1605, + 199 + ], + [ + 1474, + 206 + ], + [ + 1466, + 207 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1729, + 151 + ], + [ + 1737, + 455 + ], + [ + 1731, + 455 + ], + [ + 1725, + 148 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1734, + 153 + ], + [ + 1734, + 145 + ], + [ + 1720, + 145 + ], + [ + 1722, + 155 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1681, + 518 + ], + [ + 1706, + 519 + ], + [ + 1698, + 199 + ], + [ + 1695, + 188 + ], + [ + 1690, + 0 + ], + [ + 1672, + 0 + ], + [ + 1676, + 190 + ], + [ + 1671, + 196 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1956, + 688 + ], + [ + 1917, + 688 + ], + [ + 1919, + 365 + ], + [ + 1912, + 240 + ], + [ + 1899, + 77 + ], + [ + 1884, + 38 + ], + [ + 1862, + 44 + ], + [ + 1838, + 48 + ], + [ + 1811, + 29 + ], + [ + 1789, + 37 + ], + [ + 1769, + 43 + ], + [ + 1737, + 46 + ], + [ + 1726, + 44 + ], + [ + 1726, + 56 + ], + [ + 1737, + 80 + ], + [ + 1714, + 105 + ], + [ + 1693, + 109 + ], + [ + 1667, + 94 + ], + [ + 1657, + 71 + ], + [ + 1670, + 50 + ], + [ + 1667, + 40 + ], + [ + 1643, + 52 + ], + [ + 1626, + 25 + ], + [ + 1625, + 0 + ], + [ + 1947, + 0 + ], + [ + 1943, + 36 + ], + [ + 1921, + 71 + ], + [ + 1917, + 126 + ], + [ + 1929, + 325 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1500, + 596 + ], + [ + 1515, + 582 + ], + [ + 1528, + 568 + ], + [ + 1533, + 562 + ], + [ + 1542, + 561 + ], + [ + 1551, + 559 + ], + [ + 1551, + 555 + ], + [ + 1543, + 556 + ], + [ + 1531, + 556 + ], + [ + 1526, + 555 + ], + [ + 1525, + 562 + ], + [ + 1525, + 567 + ], + [ + 1518, + 567 + ], + [ + 1520, + 556 + ], + [ + 1521, + 548 + ], + [ + 1533, + 547 + ], + [ + 1547, + 549 + ], + [ + 1557, + 553 + ], + [ + 1558, + 561 + ], + [ + 1562, + 569 + ], + [ + 1562, + 578 + ], + [ + 1559, + 581 + ], + [ + 1555, + 572 + ], + [ + 1552, + 563 + ], + [ + 1543, + 563 + ], + [ + 1536, + 571 + ], + [ + 1520, + 590 + ], + [ + 1508, + 597 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2034, + 502 + ], + [ + 2020, + 500 + ], + [ + 2015, + 6 + ], + [ + 2015, + 0 + ], + [ + 2021, + 0 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000130_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000130_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..e6ee4192839ea74751518607bbd1e15cd8c5c8b3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000130_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000131_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000131_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ed354ad96ca4c021037507587f24dc842c778456 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000131_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000131_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000131_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..b903b27e11d4701ef3f9ca92b217ce0a842973da --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000131_000019_gtFine_polygons.json @@ -0,0 +1,6526 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 625, + 13 + ], + [ + 853, + 362 + ], + [ + 1171, + 338 + ], + [ + 1268, + 373 + ], + [ + 1320, + 390 + ], + [ + 1382, + 343 + ], + [ + 1476, + 313 + ], + [ + 1573, + 298 + ], + [ + 1757, + 290 + ], + [ + 1910, + 270 + ], + [ + 2048, + 193 + ], + [ + 2048, + 0 + ], + [ + 611, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1891, + 421 + ], + [ + 1422, + 419 + ], + [ + 1198, + 420 + ], + [ + 1059, + 424 + ], + [ + 901, + 441 + ], + [ + 500, + 453 + ], + [ + 0, + 479 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 436 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 989, + 460 + ], + [ + 974, + 458 + ], + [ + 966, + 457 + ], + [ + 965, + 448 + ], + [ + 971, + 442 + ], + [ + 986, + 440 + ], + [ + 1000, + 440 + ], + [ + 1011, + 440 + ], + [ + 1025, + 441 + ], + [ + 1039, + 441 + ], + [ + 1047, + 444 + ], + [ + 1033, + 459 + ], + [ + 1017, + 460 + ], + [ + 999, + 460 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 406, + 443 + ], + [ + 46, + 465 + ], + [ + 0, + 465 + ], + [ + 0, + 191 + ], + [ + 630, + 230 + ], + [ + 668, + 363 + ], + [ + 646, + 424 + ], + [ + 603, + 434 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1846, + 225 + ], + [ + 1804, + 231 + ], + [ + 1803, + 246 + ], + [ + 1671, + 257 + ], + [ + 1651, + 260 + ], + [ + 1610, + 260 + ], + [ + 1588, + 260 + ], + [ + 1574, + 262 + ], + [ + 1571, + 267 + ], + [ + 1550, + 270 + ], + [ + 1548, + 267 + ], + [ + 1512, + 270 + ], + [ + 1510, + 270 + ], + [ + 1509, + 262 + ], + [ + 1351, + 263 + ], + [ + 1342, + 319 + ], + [ + 1342, + 321 + ], + [ + 1321, + 320 + ], + [ + 1311, + 325 + ], + [ + 1312, + 354 + ], + [ + 1310, + 396 + ], + [ + 1293, + 391 + ], + [ + 1215, + 394 + ], + [ + 1147, + 395 + ], + [ + 1075, + 395 + ], + [ + 1031, + 398 + ], + [ + 1012, + 386 + ], + [ + 974, + 362 + ], + [ + 803, + 368 + ], + [ + 677, + 376 + ], + [ + 692, + 457 + ], + [ + 797, + 465 + ], + [ + 939, + 453 + ], + [ + 966, + 447 + ], + [ + 1001, + 442 + ], + [ + 1026, + 440 + ], + [ + 1076, + 439 + ], + [ + 1118, + 433 + ], + [ + 1146, + 432 + ], + [ + 1190, + 431 + ], + [ + 1226, + 434 + ], + [ + 1286, + 436 + ], + [ + 1365, + 419 + ], + [ + 1653, + 419 + ], + [ + 1875, + 445 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1189, + 436 + ], + [ + 1138, + 434 + ], + [ + 1125, + 433 + ], + [ + 1128, + 426 + ], + [ + 1151, + 425 + ], + [ + 1176, + 425 + ], + [ + 1192, + 425 + ], + [ + 1193, + 430 + ], + [ + 1194, + 437 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 981, + 134 + ], + [ + 995, + 138 + ], + [ + 1017, + 141 + ], + [ + 1037, + 150 + ], + [ + 1048, + 151 + ], + [ + 1055, + 158 + ], + [ + 1059, + 166 + ], + [ + 1067, + 181 + ], + [ + 1076, + 196 + ], + [ + 1078, + 206 + ], + [ + 1087, + 209 + ], + [ + 1084, + 216 + ], + [ + 1086, + 223 + ], + [ + 1097, + 216 + ], + [ + 1100, + 206 + ], + [ + 1107, + 201 + ], + [ + 1114, + 204 + ], + [ + 1119, + 212 + ], + [ + 1124, + 222 + ], + [ + 1125, + 229 + ], + [ + 1122, + 241 + ], + [ + 1131, + 247 + ], + [ + 1131, + 257 + ], + [ + 1131, + 263 + ], + [ + 1144, + 264 + ], + [ + 1154, + 268 + ], + [ + 1155, + 261 + ], + [ + 1151, + 258 + ], + [ + 1142, + 254 + ], + [ + 1144, + 247 + ], + [ + 1153, + 232 + ], + [ + 1155, + 215 + ], + [ + 1156, + 204 + ], + [ + 1169, + 202 + ], + [ + 1187, + 197 + ], + [ + 1192, + 195 + ], + [ + 1194, + 190 + ], + [ + 1197, + 183 + ], + [ + 1209, + 187 + ], + [ + 1220, + 193 + ], + [ + 1231, + 198 + ], + [ + 1238, + 197 + ], + [ + 1247, + 201 + ], + [ + 1257, + 204 + ], + [ + 1257, + 210 + ], + [ + 1250, + 220 + ], + [ + 1246, + 232 + ], + [ + 1251, + 238 + ], + [ + 1259, + 238 + ], + [ + 1269, + 228 + ], + [ + 1280, + 224 + ], + [ + 1290, + 223 + ], + [ + 1293, + 223 + ], + [ + 1291, + 232 + ], + [ + 1281, + 247 + ], + [ + 1280, + 260 + ], + [ + 1280, + 265 + ], + [ + 1285, + 268 + ], + [ + 1286, + 275 + ], + [ + 1287, + 280 + ], + [ + 1292, + 286 + ], + [ + 1288, + 294 + ], + [ + 1280, + 300 + ], + [ + 1284, + 313 + ], + [ + 1289, + 312 + ], + [ + 1292, + 315 + ], + [ + 1296, + 322 + ], + [ + 1293, + 336 + ], + [ + 1292, + 347 + ], + [ + 1295, + 360 + ], + [ + 1302, + 366 + ], + [ + 1309, + 368 + ], + [ + 1310, + 377 + ], + [ + 1310, + 394 + ], + [ + 1310, + 397 + ], + [ + 1308, + 422 + ], + [ + 1282, + 438 + ], + [ + 1252, + 442 + ], + [ + 1231, + 446 + ], + [ + 1217, + 446 + ], + [ + 1202, + 444 + ], + [ + 1195, + 439 + ], + [ + 1182, + 433 + ], + [ + 1169, + 428 + ], + [ + 1169, + 416 + ], + [ + 1164, + 409 + ], + [ + 1158, + 405 + ], + [ + 1146, + 402 + ], + [ + 1142, + 408 + ], + [ + 1141, + 417 + ], + [ + 1132, + 425 + ], + [ + 1121, + 434 + ], + [ + 1109, + 426 + ], + [ + 1101, + 415 + ], + [ + 1098, + 410 + ], + [ + 1088, + 409 + ], + [ + 1069, + 409 + ], + [ + 1047, + 411 + ], + [ + 1044, + 417 + ], + [ + 1044, + 431 + ], + [ + 1031, + 445 + ], + [ + 1023, + 448 + ], + [ + 1022, + 433 + ], + [ + 1019, + 424 + ], + [ + 1009, + 422 + ], + [ + 1004, + 422 + ], + [ + 1005, + 436 + ], + [ + 1004, + 448 + ], + [ + 994, + 448 + ], + [ + 994, + 432 + ], + [ + 991, + 420 + ], + [ + 979, + 415 + ], + [ + 971, + 406 + ], + [ + 965, + 400 + ], + [ + 955, + 399 + ], + [ + 947, + 405 + ], + [ + 938, + 411 + ], + [ + 920, + 409 + ], + [ + 907, + 401 + ], + [ + 905, + 408 + ], + [ + 903, + 419 + ], + [ + 881, + 436 + ], + [ + 769, + 463 + ], + [ + 723, + 409 + ], + [ + 714, + 309 + ], + [ + 822, + 96 + ], + [ + 912, + 98 + ], + [ + 969, + 112 + ], + [ + 972, + 117 + ], + [ + 983, + 112 + ], + [ + 993, + 122 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1171, + 440 + ], + [ + 1167, + 439 + ], + [ + 1162, + 439 + ], + [ + 1159, + 440 + ], + [ + 1158, + 439 + ], + [ + 1158, + 434 + ], + [ + 1159, + 430 + ], + [ + 1161, + 428 + ], + [ + 1170, + 428 + ], + [ + 1171, + 430 + ], + [ + 1172, + 435 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1157, + 413 + ], + [ + 1157, + 433 + ], + [ + 1160, + 433 + ], + [ + 1158, + 408 + ], + [ + 1162, + 405 + ], + [ + 1172, + 402 + ], + [ + 1171, + 400 + ], + [ + 1161, + 403 + ], + [ + 1156, + 407 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1170, + 381 + ], + [ + 1172, + 404 + ], + [ + 1193, + 404 + ], + [ + 1196, + 377 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1175, + 390 + ], + [ + 1174, + 402 + ], + [ + 1169, + 403 + ], + [ + 1170, + 391 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1130, + 392 + ], + [ + 1129, + 400 + ], + [ + 1136, + 401 + ], + [ + 1135, + 392 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1110, + 416 + ], + [ + 1110, + 426 + ], + [ + 1102, + 426 + ], + [ + 1102, + 416 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1033, + 369 + ], + [ + 1035, + 436 + ], + [ + 1031, + 437 + ], + [ + 1031, + 334 + ], + [ + 1034, + 334 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1208, + 364 + ], + [ + 1209, + 313 + ], + [ + 1031, + 317 + ], + [ + 1032, + 368 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1053, + 389 + ], + [ + 1053, + 417 + ], + [ + 1068, + 417 + ], + [ + 1069, + 390 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1040, + 394 + ], + [ + 1041, + 414 + ], + [ + 1024, + 416 + ], + [ + 1024, + 394 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1018, + 434 + ], + [ + 1020, + 439 + ], + [ + 1016, + 442 + ], + [ + 1013, + 439 + ], + [ + 1012, + 432 + ], + [ + 1015, + 432 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1008, + 413 + ], + [ + 1008, + 442 + ], + [ + 1012, + 442 + ], + [ + 1011, + 407 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1012, + 448 + ], + [ + 1011, + 430 + ], + [ + 1001, + 434 + ], + [ + 1003, + 451 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 990, + 399 + ], + [ + 992, + 418 + ], + [ + 1020, + 414 + ], + [ + 1020, + 394 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 954, + 430 + ], + [ + 961, + 421 + ], + [ + 966, + 409 + ], + [ + 971, + 409 + ], + [ + 975, + 420 + ], + [ + 975, + 430 + ], + [ + 975, + 439 + ], + [ + 965, + 445 + ], + [ + 957, + 445 + ], + [ + 949, + 441 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 865, + 394 + ], + [ + 865, + 413 + ], + [ + 879, + 416 + ], + [ + 878, + 393 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 632, + 391 + ], + [ + 619, + 398 + ], + [ + 610, + 398 + ], + [ + 600, + 389 + ], + [ + 589, + 381 + ], + [ + 600, + 381 + ], + [ + 614, + 373 + ], + [ + 607, + 347 + ], + [ + 587, + 319 + ], + [ + 584, + 325 + ], + [ + 579, + 339 + ], + [ + 574, + 329 + ], + [ + 564, + 311 + ], + [ + 522, + 312 + ], + [ + 490, + 329 + ], + [ + 483, + 348 + ], + [ + 482, + 363 + ], + [ + 487, + 386 + ], + [ + 480, + 367 + ], + [ + 468, + 336 + ], + [ + 425, + 349 + ], + [ + 392, + 374 + ], + [ + 378, + 392 + ], + [ + 376, + 412 + ], + [ + 373, + 419 + ], + [ + 364, + 411 + ], + [ + 369, + 395 + ], + [ + 366, + 379 + ], + [ + 362, + 352 + ], + [ + 353, + 337 + ], + [ + 333, + 364 + ], + [ + 331, + 392 + ], + [ + 332, + 408 + ], + [ + 331, + 422 + ], + [ + 318, + 419 + ], + [ + 311, + 412 + ], + [ + 304, + 407 + ], + [ + 295, + 406 + ], + [ + 284, + 401 + ], + [ + 275, + 387 + ], + [ + 267, + 355 + ], + [ + 262, + 317 + ], + [ + 248, + 325 + ], + [ + 236, + 342 + ], + [ + 233, + 356 + ], + [ + 238, + 376 + ], + [ + 238, + 393 + ], + [ + 228, + 397 + ], + [ + 214, + 407 + ], + [ + 201, + 417 + ], + [ + 187, + 420 + ], + [ + 173, + 411 + ], + [ + 158, + 399 + ], + [ + 149, + 402 + ], + [ + 139, + 416 + ], + [ + 122, + 422 + ], + [ + 100, + 412 + ], + [ + 90, + 378 + ], + [ + 68, + 317 + ], + [ + 0, + 291 + ], + [ + 0, + 289 + ], + [ + 0, + 480 + ], + [ + 6, + 486 + ], + [ + 120, + 479 + ], + [ + 242, + 475 + ], + [ + 418, + 462 + ], + [ + 574, + 462 + ], + [ + 649, + 455 + ], + [ + 674, + 453 + ], + [ + 714, + 447 + ], + [ + 684, + 384 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 23, + 498 + ], + [ + 0, + 497 + ], + [ + 0, + 484 + ], + [ + 348, + 470 + ], + [ + 413, + 466 + ], + [ + 487, + 464 + ], + [ + 543, + 463 + ], + [ + 618, + 458 + ], + [ + 648, + 456 + ], + [ + 654, + 456 + ], + [ + 651, + 462 + ], + [ + 492, + 469 + ], + [ + 207, + 490 + ], + [ + 34, + 498 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 58, + 510 + ], + [ + 0, + 515 + ], + [ + 0, + 498 + ], + [ + 39, + 494 + ], + [ + 76, + 495 + ], + [ + 125, + 491 + ], + [ + 196, + 487 + ], + [ + 265, + 483 + ], + [ + 308, + 479 + ], + [ + 342, + 476 + ], + [ + 380, + 475 + ], + [ + 417, + 473 + ], + [ + 448, + 472 + ], + [ + 488, + 467 + ], + [ + 517, + 466 + ], + [ + 552, + 464 + ], + [ + 586, + 464 + ], + [ + 616, + 462 + ], + [ + 637, + 458 + ], + [ + 655, + 464 + ], + [ + 634, + 470 + ], + [ + 620, + 469 + ], + [ + 560, + 471 + ], + [ + 521, + 475 + ], + [ + 371, + 484 + ], + [ + 208, + 502 + ], + [ + 100, + 509 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 231, + 421 + ], + [ + 99, + 422 + ], + [ + 0, + 431 + ], + [ + 0, + 493 + ], + [ + 196, + 476 + ], + [ + 279, + 472 + ], + [ + 320, + 472 + ], + [ + 378, + 466 + ], + [ + 429, + 464 + ], + [ + 610, + 464 + ], + [ + 657, + 458 + ], + [ + 657, + 420 + ], + [ + 614, + 423 + ], + [ + 463, + 425 + ], + [ + 341, + 423 + ], + [ + 262, + 422 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 574, + 398 + ], + [ + 577, + 462 + ], + [ + 560, + 461 + ], + [ + 562, + 389 + ], + [ + 574, + 391 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 505, + 422 + ], + [ + 500, + 422 + ], + [ + 490, + 459 + ], + [ + 514, + 460 + ], + [ + 508, + 423 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 476, + 393 + ], + [ + 480, + 465 + ], + [ + 473, + 465 + ], + [ + 471, + 347 + ], + [ + 475, + 347 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 637, + 555 + ], + [ + 0, + 718 + ], + [ + 0, + 534 + ], + [ + 7, + 535 + ], + [ + 20, + 538 + ], + [ + 35, + 539 + ], + [ + 38, + 534 + ], + [ + 35, + 525 + ], + [ + 70, + 519 + ], + [ + 131, + 507 + ], + [ + 358, + 487 + ], + [ + 443, + 474 + ], + [ + 609, + 474 + ], + [ + 687, + 469 + ], + [ + 790, + 467 + ], + [ + 777, + 511 + ], + [ + 709, + 523 + ], + [ + 645, + 533 + ], + [ + 627, + 534 + ], + [ + 634, + 541 + ], + [ + 639, + 550 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 131, + 543 + ], + [ + 133, + 403 + ], + [ + 133, + 359 + ], + [ + 131, + 357 + ], + [ + 125, + 384 + ], + [ + 121, + 412 + ], + [ + 105, + 417 + ], + [ + 0, + 328 + ], + [ + 0, + 0 + ], + [ + 954, + 0 + ], + [ + 962, + 2 + ], + [ + 958, + 8 + ], + [ + 928, + 11 + ], + [ + 929, + 19 + ], + [ + 932, + 20 + ], + [ + 950, + 17 + ], + [ + 959, + 17 + ], + [ + 966, + 34 + ], + [ + 967, + 57 + ], + [ + 961, + 62 + ], + [ + 965, + 66 + ], + [ + 969, + 66 + ], + [ + 970, + 77 + ], + [ + 970, + 91 + ], + [ + 957, + 91 + ], + [ + 950, + 86 + ], + [ + 949, + 95 + ], + [ + 958, + 109 + ], + [ + 958, + 122 + ], + [ + 950, + 196 + ], + [ + 943, + 284 + ], + [ + 888, + 341 + ], + [ + 835, + 386 + ], + [ + 815, + 402 + ], + [ + 780, + 406 + ], + [ + 723, + 406 + ], + [ + 713, + 410 + ], + [ + 704, + 433 + ], + [ + 692, + 460 + ], + [ + 669, + 475 + ], + [ + 644, + 477 + ], + [ + 637, + 479 + ], + [ + 633, + 464 + ], + [ + 635, + 420 + ], + [ + 620, + 397 + ], + [ + 603, + 383 + ], + [ + 589, + 390 + ], + [ + 566, + 400 + ], + [ + 561, + 409 + ], + [ + 560, + 462 + ], + [ + 565, + 486 + ], + [ + 565, + 501 + ], + [ + 548, + 500 + ], + [ + 534, + 496 + ], + [ + 533, + 479 + ], + [ + 541, + 447 + ], + [ + 543, + 416 + ], + [ + 533, + 396 + ], + [ + 525, + 386 + ], + [ + 514, + 339 + ], + [ + 500, + 334 + ], + [ + 480, + 347 + ], + [ + 468, + 360 + ], + [ + 462, + 371 + ], + [ + 460, + 422 + ], + [ + 462, + 466 + ], + [ + 477, + 500 + ], + [ + 477, + 509 + ], + [ + 468, + 515 + ], + [ + 450, + 506 + ], + [ + 449, + 449 + ], + [ + 436, + 400 + ], + [ + 420, + 377 + ], + [ + 406, + 366 + ], + [ + 395, + 371 + ], + [ + 374, + 375 + ], + [ + 339, + 380 + ], + [ + 318, + 380 + ], + [ + 320, + 483 + ], + [ + 308, + 483 + ], + [ + 307, + 433 + ], + [ + 310, + 386 + ], + [ + 303, + 375 + ], + [ + 277, + 375 + ], + [ + 253, + 373 + ], + [ + 242, + 384 + ], + [ + 228, + 396 + ], + [ + 212, + 387 + ], + [ + 188, + 382 + ], + [ + 182, + 369 + ], + [ + 161, + 372 + ], + [ + 152, + 382 + ], + [ + 165, + 547 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 607, + 534 + ], + [ + 607, + 256 + ], + [ + 612, + 256 + ], + [ + 617, + 256 + ], + [ + 619, + 534 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 620, + 275 + ], + [ + 637, + 277 + ], + [ + 637, + 283 + ], + [ + 636, + 286 + ], + [ + 622, + 288 + ], + [ + 623, + 298 + ], + [ + 638, + 300 + ], + [ + 638, + 306 + ], + [ + 633, + 310 + ], + [ + 622, + 308 + ], + [ + 624, + 323 + ], + [ + 637, + 324 + ], + [ + 637, + 328 + ], + [ + 635, + 331 + ], + [ + 626, + 332 + ], + [ + 612, + 331 + ], + [ + 615, + 272 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 603, + 269 + ], + [ + 619, + 269 + ], + [ + 620, + 334 + ], + [ + 601, + 334 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 974, + 482 + ], + [ + 974, + 507 + ], + [ + 968, + 507 + ], + [ + 968, + 512 + ], + [ + 956, + 515 + ], + [ + 950, + 513 + ], + [ + 948, + 508 + ], + [ + 948, + 500 + ], + [ + 923, + 500 + ], + [ + 920, + 499 + ], + [ + 917, + 499 + ], + [ + 917, + 505 + ], + [ + 905, + 503 + ], + [ + 902, + 500 + ], + [ + 899, + 499 + ], + [ + 895, + 502 + ], + [ + 891, + 505 + ], + [ + 885, + 514 + ], + [ + 844, + 450 + ], + [ + 856, + 432 + ], + [ + 859, + 422 + ], + [ + 868, + 413 + ], + [ + 879, + 408 + ], + [ + 893, + 408 + ], + [ + 929, + 408 + ], + [ + 954, + 411 + ], + [ + 962, + 434 + ], + [ + 970, + 432 + ], + [ + 977, + 437 + ], + [ + 978, + 441 + ], + [ + 969, + 446 + ], + [ + 968, + 446 + ], + [ + 972, + 459 + ], + [ + 972, + 471 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 862, + 570 + ], + [ + 837, + 571 + ], + [ + 835, + 560 + ], + [ + 770, + 563 + ], + [ + 726, + 557 + ], + [ + 718, + 554 + ], + [ + 715, + 551 + ], + [ + 704, + 551 + ], + [ + 697, + 556 + ], + [ + 694, + 569 + ], + [ + 688, + 574 + ], + [ + 674, + 574 + ], + [ + 662, + 562 + ], + [ + 661, + 538 + ], + [ + 660, + 511 + ], + [ + 660, + 492 + ], + [ + 667, + 473 + ], + [ + 685, + 439 + ], + [ + 699, + 419 + ], + [ + 705, + 413 + ], + [ + 713, + 410 + ], + [ + 726, + 409 + ], + [ + 731, + 411 + ], + [ + 754, + 411 + ], + [ + 758, + 411 + ], + [ + 759, + 408 + ], + [ + 762, + 407 + ], + [ + 763, + 410 + ], + [ + 776, + 411 + ], + [ + 810, + 411 + ], + [ + 818, + 408 + ], + [ + 824, + 408 + ], + [ + 833, + 410 + ], + [ + 842, + 418 + ], + [ + 866, + 449 + ], + [ + 876, + 462 + ], + [ + 878, + 453 + ], + [ + 884, + 448 + ], + [ + 892, + 450 + ], + [ + 893, + 456 + ], + [ + 891, + 460 + ], + [ + 885, + 464 + ], + [ + 884, + 468 + ], + [ + 888, + 472 + ], + [ + 891, + 478 + ], + [ + 892, + 491 + ], + [ + 893, + 523 + ], + [ + 892, + 537 + ], + [ + 889, + 550 + ], + [ + 886, + 555 + ], + [ + 873, + 552 + ], + [ + 865, + 552 + ], + [ + 865, + 542 + ], + [ + 862, + 542 + ], + [ + 863, + 558 + ], + [ + 863, + 564 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 718, + 497 + ], + [ + 720, + 484 + ], + [ + 776, + 483 + ], + [ + 777, + 496 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1011, + 541 + ], + [ + 1009, + 529 + ], + [ + 1009, + 519 + ], + [ + 1008, + 505 + ], + [ + 1006, + 496 + ], + [ + 1008, + 477 + ], + [ + 1014, + 465 + ], + [ + 1016, + 462 + ], + [ + 1008, + 461 + ], + [ + 1005, + 458 + ], + [ + 1008, + 452 + ], + [ + 1013, + 452 + ], + [ + 1021, + 450 + ], + [ + 1037, + 424 + ], + [ + 1047, + 419 + ], + [ + 1113, + 419 + ], + [ + 1125, + 423 + ], + [ + 1129, + 427 + ], + [ + 1141, + 450 + ], + [ + 1145, + 454 + ], + [ + 1148, + 451 + ], + [ + 1156, + 451 + ], + [ + 1161, + 454 + ], + [ + 1161, + 461 + ], + [ + 1157, + 463 + ], + [ + 1149, + 462 + ], + [ + 1150, + 470 + ], + [ + 1151, + 480 + ], + [ + 1153, + 498 + ], + [ + 1153, + 513 + ], + [ + 1151, + 521 + ], + [ + 1152, + 536 + ], + [ + 1151, + 540 + ], + [ + 1147, + 543 + ], + [ + 1138, + 543 + ], + [ + 1135, + 536 + ], + [ + 1135, + 522 + ], + [ + 1120, + 522 + ], + [ + 1117, + 525 + ], + [ + 1108, + 525 + ], + [ + 1084, + 526 + ], + [ + 1066, + 525 + ], + [ + 1062, + 522 + ], + [ + 1039, + 523 + ], + [ + 1039, + 529 + ], + [ + 1031, + 533 + ], + [ + 1026, + 539 + ], + [ + 1017, + 541 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1057, + 480 + ], + [ + 1057, + 469 + ], + [ + 1106, + 469 + ], + [ + 1105, + 479 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1737, + 211 + ], + [ + 1736, + 233 + ], + [ + 1749, + 234 + ], + [ + 1747, + 211 + ], + [ + 1804, + 209 + ], + [ + 1803, + 139 + ], + [ + 1681, + 143 + ], + [ + 1681, + 214 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1336, + 355 + ], + [ + 1322, + 345 + ], + [ + 1331, + 335 + ], + [ + 1352, + 326 + ], + [ + 1374, + 308 + ], + [ + 1377, + 294 + ], + [ + 1391, + 294 + ], + [ + 1400, + 298 + ], + [ + 1406, + 291 + ], + [ + 1415, + 288 + ], + [ + 1426, + 294 + ], + [ + 1444, + 297 + ], + [ + 1452, + 295 + ], + [ + 1457, + 287 + ], + [ + 1467, + 285 + ], + [ + 1478, + 298 + ], + [ + 1494, + 306 + ], + [ + 1513, + 309 + ], + [ + 1546, + 299 + ], + [ + 1568, + 284 + ], + [ + 1591, + 280 + ], + [ + 1606, + 292 + ], + [ + 1610, + 306 + ], + [ + 1618, + 311 + ], + [ + 1633, + 303 + ], + [ + 1644, + 297 + ], + [ + 1656, + 270 + ], + [ + 1656, + 242 + ], + [ + 1673, + 221 + ], + [ + 1695, + 212 + ], + [ + 1725, + 212 + ], + [ + 1739, + 220 + ], + [ + 1755, + 219 + ], + [ + 1774, + 228 + ], + [ + 1781, + 238 + ], + [ + 1799, + 241 + ], + [ + 1804, + 223 + ], + [ + 1798, + 195 + ], + [ + 1820, + 188 + ], + [ + 1833, + 183 + ], + [ + 1821, + 180 + ], + [ + 1818, + 161 + ], + [ + 1825, + 148 + ], + [ + 1812, + 148 + ], + [ + 1846, + 133 + ], + [ + 1848, + 135 + ], + [ + 1862, + 146 + ], + [ + 1888, + 142 + ], + [ + 1900, + 135 + ], + [ + 1908, + 118 + ], + [ + 1913, + 106 + ], + [ + 1906, + 105 + ], + [ + 1912, + 98 + ], + [ + 1929, + 97 + ], + [ + 1941, + 107 + ], + [ + 1955, + 111 + ], + [ + 1949, + 108 + ], + [ + 1947, + 92 + ], + [ + 1952, + 83 + ], + [ + 1965, + 89 + ], + [ + 1979, + 90 + ], + [ + 1999, + 88 + ], + [ + 2013, + 82 + ], + [ + 2029, + 83 + ], + [ + 2027, + 99 + ], + [ + 2038, + 90 + ], + [ + 2048, + 76 + ], + [ + 2048, + 364 + ], + [ + 1983, + 415 + ], + [ + 1680, + 415 + ], + [ + 1619, + 415 + ], + [ + 1343, + 363 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1332, + 249 + ], + [ + 1376, + 249 + ], + [ + 1378, + 313 + ], + [ + 1374, + 317 + ], + [ + 1375, + 326 + ], + [ + 1374, + 330 + ], + [ + 1367, + 323 + ], + [ + 1360, + 318 + ], + [ + 1359, + 345 + ], + [ + 1363, + 350 + ], + [ + 1375, + 347 + ], + [ + 1385, + 354 + ], + [ + 1374, + 358 + ], + [ + 1385, + 367 + ], + [ + 1408, + 362 + ], + [ + 1411, + 357 + ], + [ + 1427, + 354 + ], + [ + 1435, + 351 + ], + [ + 1445, + 345 + ], + [ + 1598, + 343 + ], + [ + 1612, + 337 + ], + [ + 1673, + 332 + ], + [ + 1666, + 338 + ], + [ + 1645, + 347 + ], + [ + 1624, + 348 + ], + [ + 1614, + 350 + ], + [ + 1612, + 357 + ], + [ + 1611, + 367 + ], + [ + 1612, + 376 + ], + [ + 1623, + 381 + ], + [ + 1636, + 377 + ], + [ + 1647, + 376 + ], + [ + 1655, + 385 + ], + [ + 1658, + 400 + ], + [ + 1676, + 398 + ], + [ + 1678, + 374 + ], + [ + 1680, + 367 + ], + [ + 1746, + 369 + ], + [ + 1770, + 371 + ], + [ + 1770, + 386 + ], + [ + 1770, + 411 + ], + [ + 1727, + 431 + ], + [ + 1637, + 429 + ], + [ + 1567, + 432 + ], + [ + 1504, + 433 + ], + [ + 1377, + 438 + ], + [ + 1357, + 443 + ], + [ + 1333, + 431 + ], + [ + 1329, + 401 + ], + [ + 1330, + 371 + ], + [ + 1301, + 374 + ], + [ + 1298, + 354 + ], + [ + 1309, + 349 + ], + [ + 1332, + 349 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1385, + 360 + ], + [ + 1388, + 429 + ], + [ + 1391, + 430 + ], + [ + 1388, + 354 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1403, + 347 + ], + [ + 1401, + 358 + ], + [ + 1389, + 362 + ], + [ + 1382, + 359 + ], + [ + 1384, + 352 + ], + [ + 1387, + 349 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1322, + 256 + ], + [ + 1318, + 419 + ], + [ + 1321, + 420 + ], + [ + 1323, + 253 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1305, + 256 + ], + [ + 1308, + 251 + ], + [ + 1319, + 251 + ], + [ + 1322, + 257 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1286, + 385 + ], + [ + 1283, + 431 + ], + [ + 1264, + 447 + ], + [ + 1225, + 443 + ], + [ + 1202, + 444 + ], + [ + 1193, + 435 + ], + [ + 1205, + 402 + ], + [ + 1211, + 391 + ], + [ + 1217, + 389 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1234, + 385 + ], + [ + 1235, + 447 + ], + [ + 1238, + 447 + ], + [ + 1237, + 381 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1248, + 376 + ], + [ + 1249, + 394 + ], + [ + 1223, + 394 + ], + [ + 1223, + 376 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1225, + 448 + ], + [ + 1224, + 418 + ], + [ + 1222, + 402 + ], + [ + 1221, + 377 + ], + [ + 1293, + 325 + ], + [ + 1281, + 313 + ], + [ + 1272, + 296 + ], + [ + 1279, + 283 + ], + [ + 1282, + 267 + ], + [ + 1281, + 249 + ], + [ + 1292, + 237 + ], + [ + 1293, + 223 + ], + [ + 1278, + 227 + ], + [ + 1257, + 243 + ], + [ + 1243, + 234 + ], + [ + 1252, + 212 + ], + [ + 1256, + 202 + ], + [ + 1233, + 186 + ], + [ + 1214, + 182 + ], + [ + 1198, + 179 + ], + [ + 1185, + 192 + ], + [ + 1160, + 194 + ], + [ + 1148, + 218 + ], + [ + 1156, + 234 + ], + [ + 1144, + 243 + ], + [ + 1145, + 257 + ], + [ + 1123, + 265 + ], + [ + 1110, + 280 + ], + [ + 1122, + 243 + ], + [ + 1108, + 245 + ], + [ + 1116, + 232 + ], + [ + 1127, + 221 + ], + [ + 1118, + 207 + ], + [ + 1099, + 211 + ], + [ + 1083, + 215 + ], + [ + 1076, + 203 + ], + [ + 1072, + 186 + ], + [ + 1060, + 172 + ], + [ + 1050, + 153 + ], + [ + 1015, + 145 + ], + [ + 983, + 139 + ], + [ + 1012, + 290 + ], + [ + 1002, + 306 + ], + [ + 1012, + 336 + ], + [ + 1023, + 355 + ], + [ + 1031, + 355 + ], + [ + 1043, + 337 + ], + [ + 1044, + 320 + ], + [ + 1047, + 311 + ], + [ + 1180, + 307 + ], + [ + 1186, + 312 + ], + [ + 1189, + 322 + ], + [ + 1192, + 326 + ], + [ + 1196, + 322 + ], + [ + 1196, + 329 + ], + [ + 1198, + 339 + ], + [ + 1205, + 352 + ], + [ + 1206, + 369 + ], + [ + 1203, + 383 + ], + [ + 1194, + 372 + ], + [ + 1178, + 373 + ], + [ + 1173, + 380 + ], + [ + 1176, + 393 + ], + [ + 1183, + 402 + ], + [ + 1195, + 408 + ], + [ + 1198, + 415 + ], + [ + 1198, + 424 + ], + [ + 1199, + 437 + ], + [ + 1203, + 446 + ], + [ + 1213, + 448 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1405, + 473 + ], + [ + 1322, + 458 + ], + [ + 1342, + 448 + ], + [ + 1390, + 449 + ], + [ + 1647, + 461 + ], + [ + 1725, + 461 + ], + [ + 1727, + 477 + ], + [ + 1713, + 493 + ], + [ + 1666, + 503 + ], + [ + 1543, + 492 + ], + [ + 1461, + 478 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1383, + 491 + ], + [ + 1352, + 491 + ], + [ + 1338, + 477 + ], + [ + 1340, + 462 + ], + [ + 1352, + 457 + ], + [ + 1366, + 461 + ], + [ + 1400, + 466 + ], + [ + 1432, + 468 + ], + [ + 1469, + 466 + ], + [ + 1516, + 474 + ], + [ + 1517, + 484 + ], + [ + 1499, + 489 + ], + [ + 1468, + 492 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1381, + 411 + ], + [ + 1357, + 415 + ], + [ + 1327, + 417 + ], + [ + 1334, + 457 + ], + [ + 1373, + 454 + ], + [ + 1407, + 454 + ], + [ + 1515, + 461 + ], + [ + 1615, + 468 + ], + [ + 1685, + 466 + ], + [ + 1725, + 466 + ], + [ + 1725, + 416 + ], + [ + 1714, + 409 + ], + [ + 1662, + 406 + ], + [ + 1609, + 404 + ], + [ + 1532, + 403 + ], + [ + 1493, + 401 + ], + [ + 1418, + 409 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1571, + 572 + ], + [ + 1524, + 566 + ], + [ + 1478, + 550 + ], + [ + 1442, + 533 + ], + [ + 1437, + 519 + ], + [ + 1453, + 503 + ], + [ + 1498, + 491 + ], + [ + 1565, + 489 + ], + [ + 1621, + 491 + ], + [ + 1673, + 498 + ], + [ + 1716, + 501 + ], + [ + 1731, + 536 + ], + [ + 1656, + 575 + ], + [ + 1601, + 578 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1571, + 511 + ], + [ + 1473, + 526 + ], + [ + 1436, + 527 + ], + [ + 1436, + 512 + ], + [ + 1461, + 498 + ], + [ + 1509, + 487 + ], + [ + 1563, + 490 + ], + [ + 1612, + 490 + ], + [ + 1628, + 491 + ], + [ + 1627, + 504 + ], + [ + 1588, + 510 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1541, + 573 + ], + [ + 1509, + 566 + ], + [ + 1512, + 554 + ], + [ + 1525, + 545 + ], + [ + 1548, + 533 + ], + [ + 1578, + 529 + ], + [ + 1608, + 522 + ], + [ + 1660, + 513 + ], + [ + 1674, + 512 + ], + [ + 1673, + 534 + ], + [ + 1632, + 563 + ], + [ + 1597, + 574 + ], + [ + 1580, + 576 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1676, + 275 + ], + [ + 1676, + 351 + ], + [ + 1679, + 351 + ], + [ + 1679, + 272 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1673, + 270 + ], + [ + 1669, + 274 + ], + [ + 1671, + 275 + ], + [ + 1678, + 275 + ], + [ + 1679, + 273 + ], + [ + 1679, + 271 + ], + [ + 1678, + 270 + ], + [ + 1678, + 270 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1670, + 354 + ], + [ + 1671, + 408 + ], + [ + 1675, + 406 + ], + [ + 1674, + 351 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1631, + 353 + ], + [ + 1632, + 410 + ], + [ + 1626, + 408 + ], + [ + 1624, + 351 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1608, + 348 + ], + [ + 1605, + 411 + ], + [ + 1612, + 411 + ], + [ + 1612, + 347 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1504, + 333 + ], + [ + 1506, + 451 + ], + [ + 1511, + 452 + ], + [ + 1508, + 332 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1546, + 339 + ], + [ + 1548, + 426 + ], + [ + 1550, + 426 + ], + [ + 1552, + 426 + ], + [ + 1550, + 337 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1357, + 166 + ], + [ + 1359, + 460 + ], + [ + 1365, + 461 + ], + [ + 1361, + 166 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1355, + 393 + ], + [ + 1353, + 386 + ], + [ + 1354, + 381 + ], + [ + 1359, + 378 + ], + [ + 1364, + 379 + ], + [ + 1369, + 383 + ], + [ + 1369, + 388 + ], + [ + 1366, + 392 + ], + [ + 1361, + 393 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1415, + 344 + ], + [ + 1415, + 479 + ], + [ + 1420, + 479 + ], + [ + 1418, + 340 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1447, + 359 + ], + [ + 1460, + 366 + ], + [ + 1449, + 374 + ], + [ + 1395, + 374 + ], + [ + 1396, + 358 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1479, + 409 + ], + [ + 1465, + 408 + ], + [ + 1454, + 410 + ], + [ + 1456, + 459 + ], + [ + 1481, + 460 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1554, + 13 + ], + [ + 1558, + 14 + ], + [ + 1563, + 432 + ], + [ + 1557, + 430 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1539, + 19 + ], + [ + 1539, + 12 + ], + [ + 1547, + 6 + ], + [ + 1557, + 4 + ], + [ + 1565, + 7 + ], + [ + 1565, + 15 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1216, + 169 + ], + [ + 1239, + 167 + ], + [ + 1239, + 171 + ], + [ + 1234, + 173 + ], + [ + 1217, + 174 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1581, + 307 + ], + [ + 1583, + 471 + ], + [ + 1587, + 471 + ], + [ + 1584, + 304 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1598, + 305 + ], + [ + 1576, + 284 + ], + [ + 1553, + 305 + ], + [ + 1576, + 326 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1569, + 353 + ], + [ + 1569, + 388 + ], + [ + 1578, + 390 + ], + [ + 1579, + 351 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1592, + 356 + ], + [ + 1619, + 298 + ], + [ + 1624, + 299 + ], + [ + 1595, + 364 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1590, + 386 + ], + [ + 1590, + 351 + ], + [ + 1598, + 351 + ], + [ + 1598, + 354 + ], + [ + 1605, + 354 + ], + [ + 1605, + 356 + ], + [ + 1598, + 359 + ], + [ + 1598, + 364 + ], + [ + 1604, + 364 + ], + [ + 1604, + 367 + ], + [ + 1604, + 368 + ], + [ + 1600, + 370 + ], + [ + 1599, + 379 + ], + [ + 1603, + 379 + ], + [ + 1605, + 381 + ], + [ + 1601, + 382 + ], + [ + 1598, + 384 + ], + [ + 1598, + 387 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1514, + 415 + ], + [ + 1525, + 416 + ], + [ + 1523, + 451 + ], + [ + 1511, + 450 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1519, + 364 + ], + [ + 1520, + 392 + ], + [ + 1510, + 391 + ], + [ + 1512, + 363 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1490, + 532 + ], + [ + 1485, + 533 + ], + [ + 1478, + 533 + ], + [ + 1470, + 522 + ], + [ + 1470, + 503 + ], + [ + 1474, + 490 + ], + [ + 1482, + 482 + ], + [ + 1488, + 479 + ], + [ + 1493, + 479 + ], + [ + 1490, + 477 + ], + [ + 1492, + 470 + ], + [ + 1497, + 467 + ], + [ + 1500, + 451 + ], + [ + 1486, + 450 + ], + [ + 1476, + 444 + ], + [ + 1476, + 439 + ], + [ + 1484, + 440 + ], + [ + 1502, + 447 + ], + [ + 1514, + 447 + ], + [ + 1519, + 449 + ], + [ + 1519, + 452 + ], + [ + 1505, + 455 + ], + [ + 1505, + 459 + ], + [ + 1505, + 462 + ], + [ + 1552, + 457 + ], + [ + 1554, + 446 + ], + [ + 1561, + 439 + ], + [ + 1579, + 441 + ], + [ + 1586, + 446 + ], + [ + 1586, + 458 + ], + [ + 1582, + 464 + ], + [ + 1571, + 465 + ], + [ + 1572, + 471 + ], + [ + 1585, + 477 + ], + [ + 1591, + 489 + ], + [ + 1591, + 502 + ], + [ + 1587, + 514 + ], + [ + 1580, + 522 + ], + [ + 1569, + 525 + ], + [ + 1557, + 524 + ], + [ + 1547, + 514 + ], + [ + 1539, + 506 + ], + [ + 1537, + 509 + ], + [ + 1526, + 516 + ], + [ + 1519, + 517 + ], + [ + 1515, + 511 + ], + [ + 1523, + 504 + ], + [ + 1516, + 495 + ], + [ + 1511, + 483 + ], + [ + 1502, + 475 + ], + [ + 1500, + 479 + ], + [ + 1503, + 486 + ], + [ + 1505, + 499 + ], + [ + 1501, + 511 + ], + [ + 1497, + 523 + ] + ] + }, + { + "label": "rider", + "polygon": [ + [ + 1540, + 394 + ], + [ + 1532, + 391 + ], + [ + 1525, + 387 + ], + [ + 1520, + 387 + ], + [ + 1517, + 390 + ], + [ + 1521, + 397 + ], + [ + 1522, + 408 + ], + [ + 1521, + 416 + ], + [ + 1516, + 424 + ], + [ + 1515, + 435 + ], + [ + 1514, + 444 + ], + [ + 1510, + 452 + ], + [ + 1512, + 455 + ], + [ + 1518, + 453 + ], + [ + 1522, + 449 + ], + [ + 1524, + 451 + ], + [ + 1528, + 464 + ], + [ + 1524, + 467 + ], + [ + 1529, + 477 + ], + [ + 1533, + 490 + ], + [ + 1529, + 498 + ], + [ + 1516, + 508 + ], + [ + 1520, + 518 + ], + [ + 1530, + 510 + ], + [ + 1541, + 496 + ], + [ + 1548, + 488 + ], + [ + 1547, + 469 + ], + [ + 1547, + 463 + ], + [ + 1558, + 457 + ], + [ + 1564, + 453 + ], + [ + 1568, + 448 + ], + [ + 1574, + 445 + ], + [ + 1575, + 435 + ], + [ + 1571, + 422 + ], + [ + 1562, + 418 + ], + [ + 1555, + 414 + ], + [ + 1553, + 405 + ], + [ + 1548, + 396 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1195, + 21 + ], + [ + 1314, + 24 + ], + [ + 1390, + 45 + ], + [ + 1482, + 67 + ], + [ + 1507, + 77 + ], + [ + 1525, + 97 + ], + [ + 1532, + 120 + ], + [ + 1533, + 161 + ], + [ + 1530, + 295 + ], + [ + 1529, + 533 + ], + [ + 1543, + 534 + ], + [ + 1539, + 136 + ], + [ + 1536, + 108 + ], + [ + 1526, + 89 + ], + [ + 1500, + 70 + ], + [ + 1472, + 60 + ], + [ + 1319, + 21 + ], + [ + 1196, + 15 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1648, + 14 + ], + [ + 1651, + 261 + ], + [ + 1662, + 261 + ], + [ + 1657, + 0 + ], + [ + 1646, + 0 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1543, + 221 + ], + [ + 1542, + 207 + ], + [ + 1531, + 211 + ], + [ + 1532, + 229 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1352, + 6 + ], + [ + 1351, + 14 + ], + [ + 1347, + 18 + ], + [ + 1316, + 19 + ], + [ + 1312, + 17 + ], + [ + 1313, + 8 + ], + [ + 1316, + 7 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1199, + 70 + ], + [ + 1199, + 0 + ], + [ + 1155, + 0 + ], + [ + 1155, + 2 + ], + [ + 1155, + 69 + ], + [ + 1155, + 74 + ], + [ + 1158, + 77 + ], + [ + 1162, + 78 + ], + [ + 1192, + 76 + ], + [ + 1197, + 76 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1598, + 244 + ], + [ + 1596, + 257 + ], + [ + 1537, + 256 + ], + [ + 1539, + 242 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1575, + 223 + ], + [ + 1569, + 223 + ], + [ + 1567, + 231 + ], + [ + 1567, + 240 + ], + [ + 1572, + 242 + ], + [ + 1575, + 236 + ], + [ + 1576, + 228 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1523, + 283 + ], + [ + 1522, + 289 + ], + [ + 1505, + 291 + ], + [ + 1505, + 297 + ], + [ + 1507, + 300 + ], + [ + 1525, + 303 + ], + [ + 1523, + 308 + ], + [ + 1507, + 311 + ], + [ + 1506, + 317 + ], + [ + 1513, + 320 + ], + [ + 1523, + 322 + ], + [ + 1523, + 331 + ], + [ + 1507, + 334 + ], + [ + 1506, + 339 + ], + [ + 1509, + 342 + ], + [ + 1522, + 345 + ], + [ + 1525, + 345 + ], + [ + 1531, + 346 + ], + [ + 1536, + 319 + ], + [ + 1533, + 284 + ], + [ + 1532, + 282 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1549, + 281 + ], + [ + 1550, + 347 + ], + [ + 1531, + 346 + ], + [ + 1531, + 281 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1761, + 239 + ], + [ + 1766, + 240 + ], + [ + 1764, + 404 + ], + [ + 1761, + 404 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1730, + 320 + ], + [ + 1729, + 370 + ], + [ + 1773, + 370 + ], + [ + 1772, + 322 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1928, + 315 + ], + [ + 1930, + 335 + ], + [ + 1933, + 336 + ], + [ + 1933, + 316 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1921, + 319 + ], + [ + 1917, + 313 + ], + [ + 1920, + 310 + ], + [ + 1928, + 311 + ], + [ + 1932, + 312 + ], + [ + 1932, + 316 + ], + [ + 1930, + 322 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1248, + 514 + ], + [ + 1246, + 525 + ], + [ + 1243, + 526 + ], + [ + 1235, + 524 + ], + [ + 1231, + 519 + ], + [ + 1231, + 519 + ], + [ + 1224, + 519 + ], + [ + 1223, + 510 + ], + [ + 1223, + 488 + ], + [ + 1223, + 469 + ], + [ + 1228, + 457 + ], + [ + 1229, + 452 + ], + [ + 1224, + 453 + ], + [ + 1217, + 454 + ], + [ + 1216, + 448 + ], + [ + 1219, + 443 + ], + [ + 1227, + 443 + ], + [ + 1230, + 448 + ], + [ + 1233, + 446 + ], + [ + 1250, + 416 + ], + [ + 1254, + 413 + ], + [ + 1325, + 412 + ], + [ + 1331, + 412 + ], + [ + 1338, + 429 + ], + [ + 1350, + 451 + ], + [ + 1354, + 466 + ], + [ + 1359, + 482 + ], + [ + 1359, + 505 + ], + [ + 1359, + 521 + ], + [ + 1357, + 528 + ], + [ + 1347, + 527 + ], + [ + 1343, + 522 + ], + [ + 1342, + 516 + ], + [ + 1338, + 516 + ], + [ + 1338, + 519 + ], + [ + 1332, + 520 + ], + [ + 1328, + 516 + ], + [ + 1328, + 513 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1649, + 449 + ], + [ + 1650, + 493 + ], + [ + 1676, + 488 + ], + [ + 1675, + 431 + ], + [ + 1675, + 422 + ], + [ + 1649, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1604, + 758 + ], + [ + 1582, + 754 + ], + [ + 1570, + 736 + ], + [ + 1564, + 670 + ], + [ + 1563, + 638 + ], + [ + 1564, + 603 + ], + [ + 1572, + 574 + ], + [ + 1584, + 558 + ], + [ + 1607, + 537 + ], + [ + 1620, + 528 + ], + [ + 1631, + 520 + ], + [ + 1607, + 514 + ], + [ + 1599, + 510 + ], + [ + 1588, + 493 + ], + [ + 1588, + 489 + ], + [ + 1602, + 479 + ], + [ + 1625, + 479 + ], + [ + 1631, + 479 + ], + [ + 1640, + 480 + ], + [ + 1640, + 484 + ], + [ + 1640, + 487 + ], + [ + 1640, + 502 + ], + [ + 1644, + 505 + ], + [ + 1654, + 485 + ], + [ + 1695, + 436 + ], + [ + 1731, + 404 + ], + [ + 1764, + 386 + ], + [ + 1777, + 381 + ], + [ + 1792, + 365 + ], + [ + 1803, + 359 + ], + [ + 1813, + 355 + ], + [ + 1849, + 344 + ], + [ + 1911, + 334 + ], + [ + 1937, + 329 + ], + [ + 1985, + 329 + ], + [ + 2022, + 329 + ], + [ + 2041, + 334 + ], + [ + 2048, + 339 + ], + [ + 2048, + 855 + ], + [ + 2022, + 852 + ], + [ + 1922, + 855 + ], + [ + 1891, + 852 + ], + [ + 1888, + 871 + ], + [ + 1880, + 890 + ], + [ + 1873, + 904 + ], + [ + 1859, + 907 + ], + [ + 1832, + 906 + ], + [ + 1802, + 895 + ], + [ + 1782, + 845 + ], + [ + 1771, + 798 + ], + [ + 1771, + 772 + ], + [ + 1771, + 768 + ], + [ + 1648, + 731 + ], + [ + 1647, + 739 + ], + [ + 1639, + 751 + ], + [ + 1632, + 759 + ], + [ + 1624, + 761 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1278, + 476 + ], + [ + 1319, + 476 + ], + [ + 1319, + 467 + ], + [ + 1279, + 467 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000133_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000133_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..efa755268a82f040420894e615e6945f43d6208e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000133_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000133_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000133_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..df72b827c23cae4c33631e3e5941fcba1299e598 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000133_000019_gtFine_polygons.json @@ -0,0 +1,5072 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 17, + 312 + ], + [ + 633, + 311 + ], + [ + 1247, + 331 + ], + [ + 1694, + 334 + ], + [ + 2048, + 322 + ], + [ + 2048, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 307 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 222, + 452 + ], + [ + 106, + 446 + ], + [ + 44, + 450 + ], + [ + 0, + 453 + ], + [ + 0, + 1018 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 594 + ], + [ + 859, + 458 + ], + [ + 403, + 446 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 538, + 89 + ], + [ + 526, + 62 + ], + [ + 314, + 60 + ], + [ + 313, + 58 + ], + [ + 298, + 57 + ], + [ + 298, + 48 + ], + [ + 291, + 41 + ], + [ + 280, + 38 + ], + [ + 282, + 31 + ], + [ + 277, + 29 + ], + [ + 275, + 37 + ], + [ + 252, + 36 + ], + [ + 251, + 32 + ], + [ + 247, + 33 + ], + [ + 247, + 35 + ], + [ + 241, + 35 + ], + [ + 240, + 34 + ], + [ + 234, + 32 + ], + [ + 235, + 35 + ], + [ + 226, + 34 + ], + [ + 226, + 30 + ], + [ + 223, + 30 + ], + [ + 221, + 33 + ], + [ + 180, + 28 + ], + [ + 177, + 27 + ], + [ + 171, + 22 + ], + [ + 171, + 42 + ], + [ + 174, + 44 + ], + [ + 173, + 279 + ], + [ + 7, + 382 + ], + [ + 4, + 457 + ], + [ + 74, + 456 + ], + [ + 176, + 452 + ], + [ + 312, + 446 + ], + [ + 678, + 437 + ], + [ + 1543, + 402 + ], + [ + 2048, + 354 + ], + [ + 2048, + 210 + ], + [ + 2000, + 212 + ], + [ + 1998, + 205 + ], + [ + 1979, + 203 + ], + [ + 1979, + 200 + ], + [ + 1984, + 193 + ], + [ + 1985, + 151 + ], + [ + 1978, + 151 + ], + [ + 1974, + 214 + ], + [ + 1960, + 216 + ], + [ + 1958, + 236 + ], + [ + 1894, + 233 + ], + [ + 1870, + 235 + ], + [ + 1851, + 242 + ], + [ + 1801, + 249 + ], + [ + 1642, + 253 + ], + [ + 1589, + 252 + ], + [ + 1554, + 255 + ], + [ + 1526, + 262 + ], + [ + 1469, + 275 + ], + [ + 1292, + 302 + ], + [ + 1250, + 308 + ], + [ + 1214, + 307 + ], + [ + 1192, + 306 + ], + [ + 1193, + 270 + ], + [ + 1026, + 235 + ], + [ + 799, + 252 + ], + [ + 528, + 252 + ], + [ + 522, + 219 + ], + [ + 521, + 139 + ], + [ + 512, + 131 + ], + [ + 512, + 124 + ], + [ + 513, + 118 + ], + [ + 512, + 107 + ], + [ + 514, + 106 + ], + [ + 510, + 102 + ], + [ + 512, + 96 + ], + [ + 515, + 96 + ], + [ + 516, + 93 + ], + [ + 537, + 94 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 377, + 353 + ], + [ + 311, + 412 + ], + [ + 295, + 432 + ], + [ + 306, + 436 + ], + [ + 350, + 437 + ], + [ + 373, + 425 + ], + [ + 403, + 414 + ], + [ + 505, + 425 + ], + [ + 1087, + 422 + ], + [ + 1126, + 389 + ], + [ + 1178, + 358 + ], + [ + 1164, + 342 + ], + [ + 1153, + 348 + ], + [ + 1133, + 363 + ], + [ + 1110, + 368 + ], + [ + 1079, + 368 + ], + [ + 1061, + 367 + ], + [ + 1049, + 357 + ], + [ + 1031, + 357 + ], + [ + 1012, + 353 + ], + [ + 996, + 339 + ], + [ + 955, + 339 + ], + [ + 906, + 328 + ], + [ + 853, + 325 + ], + [ + 815, + 312 + ], + [ + 771, + 310 + ], + [ + 737, + 287 + ], + [ + 679, + 290 + ], + [ + 592, + 314 + ], + [ + 576, + 338 + ], + [ + 568, + 358 + ], + [ + 567, + 373 + ], + [ + 560, + 381 + ], + [ + 545, + 379 + ], + [ + 525, + 376 + ], + [ + 507, + 370 + ], + [ + 496, + 339 + ], + [ + 472, + 325 + ], + [ + 428, + 326 + ], + [ + 383, + 348 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1778, + 691 + ], + [ + 1121, + 534 + ], + [ + 851, + 487 + ], + [ + 799, + 484 + ], + [ + 639, + 474 + ], + [ + 493, + 473 + ], + [ + 409, + 470 + ], + [ + 294, + 466 + ], + [ + 207, + 466 + ], + [ + 138, + 466 + ], + [ + 131, + 458 + ], + [ + 40, + 461 + ], + [ + 15, + 461 + ], + [ + 18, + 453 + ], + [ + 42, + 452 + ], + [ + 84, + 454 + ], + [ + 131, + 455 + ], + [ + 161, + 454 + ], + [ + 201, + 453 + ], + [ + 256, + 441 + ], + [ + 291, + 436 + ], + [ + 309, + 423 + ], + [ + 329, + 413 + ], + [ + 346, + 409 + ], + [ + 374, + 406 + ], + [ + 416, + 403 + ], + [ + 450, + 404 + ], + [ + 499, + 408 + ], + [ + 548, + 415 + ], + [ + 584, + 420 + ], + [ + 637, + 421 + ], + [ + 690, + 420 + ], + [ + 722, + 421 + ], + [ + 754, + 424 + ], + [ + 834, + 423 + ], + [ + 951, + 420 + ], + [ + 1128, + 378 + ], + [ + 1189, + 339 + ], + [ + 1232, + 313 + ], + [ + 1264, + 298 + ], + [ + 1299, + 282 + ], + [ + 1333, + 270 + ], + [ + 1391, + 266 + ], + [ + 1440, + 271 + ], + [ + 1470, + 281 + ], + [ + 1504, + 288 + ], + [ + 1584, + 296 + ], + [ + 1672, + 300 + ], + [ + 1834, + 316 + ], + [ + 1873, + 321 + ], + [ + 1936, + 330 + ], + [ + 2002, + 336 + ], + [ + 2033, + 335 + ], + [ + 2048, + 335 + ], + [ + 2047, + 790 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1862, + 630 + ], + [ + 1584, + 572 + ], + [ + 1367, + 539 + ], + [ + 1291, + 527 + ], + [ + 1144, + 505 + ], + [ + 1086, + 496 + ], + [ + 1043, + 492 + ], + [ + 1003, + 480 + ], + [ + 1137, + 491 + ], + [ + 1603, + 542 + ], + [ + 1826, + 566 + ], + [ + 1859, + 582 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 17, + 450 + ], + [ + 16, + 431 + ], + [ + 14, + 424 + ], + [ + 0, + 421 + ], + [ + 0, + 281 + ], + [ + 23, + 279 + ], + [ + 35, + 270 + ], + [ + 36, + 255 + ], + [ + 42, + 237 + ], + [ + 57, + 228 + ], + [ + 72, + 216 + ], + [ + 88, + 203 + ], + [ + 117, + 194 + ], + [ + 135, + 184 + ], + [ + 155, + 171 + ], + [ + 191, + 171 + ], + [ + 223, + 183 + ], + [ + 250, + 207 + ], + [ + 283, + 214 + ], + [ + 300, + 230 + ], + [ + 318, + 254 + ], + [ + 357, + 299 + ], + [ + 368, + 322 + ], + [ + 365, + 337 + ], + [ + 350, + 346 + ], + [ + 314, + 369 + ], + [ + 300, + 389 + ], + [ + 295, + 416 + ], + [ + 298, + 446 + ], + [ + 300, + 459 + ], + [ + 292, + 454 + ], + [ + 289, + 423 + ], + [ + 284, + 407 + ], + [ + 277, + 395 + ], + [ + 277, + 376 + ], + [ + 267, + 378 + ], + [ + 255, + 392 + ], + [ + 260, + 408 + ], + [ + 242, + 417 + ], + [ + 226, + 422 + ], + [ + 214, + 422 + ], + [ + 207, + 422 + ], + [ + 197, + 432 + ], + [ + 188, + 441 + ], + [ + 187, + 454 + ], + [ + 192, + 468 + ], + [ + 171, + 468 + ], + [ + 173, + 450 + ], + [ + 175, + 419 + ], + [ + 160, + 418 + ], + [ + 137, + 424 + ], + [ + 127, + 433 + ], + [ + 130, + 433 + ], + [ + 175, + 433 + ], + [ + 179, + 432 + ], + [ + 175, + 437 + ], + [ + 167, + 434 + ], + [ + 129, + 434 + ], + [ + 128, + 434 + ], + [ + 123, + 443 + ], + [ + 118, + 452 + ], + [ + 104, + 470 + ], + [ + 97, + 461 + ], + [ + 99, + 437 + ], + [ + 98, + 430 + ], + [ + 84, + 426 + ], + [ + 76, + 426 + ], + [ + 75, + 439 + ], + [ + 76, + 467 + ], + [ + 66, + 465 + ], + [ + 67, + 432 + ], + [ + 64, + 424 + ], + [ + 55, + 422 + ], + [ + 40, + 427 + ], + [ + 35, + 448 + ], + [ + 36, + 464 + ], + [ + 34, + 464 + ], + [ + 32, + 442 + ], + [ + 32, + 428 + ], + [ + 26, + 423 + ], + [ + 21, + 435 + ], + [ + 21, + 461 + ], + [ + 19, + 461 + ], + [ + 16, + 460 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 62, + 442 + ], + [ + 54, + 444 + ], + [ + 48, + 447 + ], + [ + 40, + 449 + ], + [ + 42, + 457 + ], + [ + 54, + 457 + ], + [ + 62, + 455 + ], + [ + 70, + 446 + ], + [ + 70, + 440 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 94, + 420 + ], + [ + 90, + 417 + ], + [ + 84, + 420 + ], + [ + 82, + 426 + ], + [ + 87, + 429 + ], + [ + 91, + 430 + ], + [ + 93, + 425 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 83, + 430 + ], + [ + 83, + 468 + ], + [ + 87, + 468 + ], + [ + 87, + 422 + ], + [ + 84, + 422 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 49, + 429 + ], + [ + 48, + 464 + ], + [ + 53, + 464 + ], + [ + 53, + 417 + ], + [ + 49, + 417 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 23, + 434 + ], + [ + 21, + 429 + ], + [ + 21, + 425 + ], + [ + 25, + 423 + ], + [ + 29, + 423 + ], + [ + 31, + 426 + ], + [ + 30, + 431 + ], + [ + 29, + 434 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 142, + 395 + ], + [ + 140, + 454 + ], + [ + 143, + 454 + ], + [ + 145, + 393 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 146, + 416 + ], + [ + 146, + 426 + ], + [ + 146, + 427 + ], + [ + 137, + 426 + ], + [ + 138, + 417 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 459, + 539 + ], + [ + 575, + 542 + ], + [ + 588, + 533 + ], + [ + 562, + 493 + ], + [ + 450, + 477 + ], + [ + 366, + 468 + ], + [ + 315, + 465 + ], + [ + 284, + 465 + ], + [ + 198, + 464 + ], + [ + 159, + 465 + ], + [ + 55, + 464 + ], + [ + 10, + 464 + ], + [ + 0, + 464 + ], + [ + 2, + 474 + ], + [ + 59, + 475 + ], + [ + 170, + 477 + ], + [ + 247, + 480 + ], + [ + 339, + 485 + ], + [ + 390, + 490 + ], + [ + 411, + 498 + ], + [ + 427, + 509 + ], + [ + 440, + 525 + ], + [ + 449, + 539 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 486, + 433 + ], + [ + 471, + 428 + ], + [ + 453, + 429 + ], + [ + 411, + 429 + ], + [ + 390, + 429 + ], + [ + 367, + 429 + ], + [ + 340, + 433 + ], + [ + 304, + 434 + ], + [ + 302, + 434 + ], + [ + 282, + 436 + ], + [ + 280, + 436 + ], + [ + 269, + 439 + ], + [ + 268, + 450 + ], + [ + 270, + 458 + ], + [ + 288, + 463 + ], + [ + 345, + 464 + ], + [ + 401, + 465 + ], + [ + 468, + 465 + ], + [ + 514, + 464 + ], + [ + 684, + 464 + ], + [ + 816, + 463 + ], + [ + 1257, + 481 + ], + [ + 1581, + 509 + ], + [ + 1876, + 535 + ], + [ + 2048, + 545 + ], + [ + 2048, + 355 + ], + [ + 1618, + 394 + ], + [ + 1607, + 385 + ], + [ + 1556, + 394 + ], + [ + 1522, + 397 + ], + [ + 1446, + 395 + ], + [ + 1382, + 393 + ], + [ + 1333, + 394 + ], + [ + 1284, + 394 + ], + [ + 1254, + 395 + ], + [ + 1223, + 402 + ], + [ + 1163, + 403 + ], + [ + 1105, + 404 + ], + [ + 1042, + 405 + ], + [ + 1004, + 413 + ], + [ + 965, + 416 + ], + [ + 910, + 431 + ], + [ + 859, + 430 + ], + [ + 845, + 426 + ], + [ + 821, + 426 + ], + [ + 799, + 424 + ], + [ + 781, + 416 + ], + [ + 758, + 421 + ], + [ + 746, + 425 + ], + [ + 740, + 436 + ], + [ + 727, + 438 + ], + [ + 712, + 433 + ], + [ + 686, + 428 + ], + [ + 651, + 425 + ], + [ + 617, + 429 + ], + [ + 596, + 431 + ], + [ + 566, + 427 + ], + [ + 544, + 429 + ], + [ + 524, + 427 + ], + [ + 510, + 428 + ], + [ + 497, + 432 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 203, + 409 + ], + [ + 195, + 422 + ], + [ + 201, + 462 + ], + [ + 202, + 467 + ], + [ + 220, + 470 + ], + [ + 233, + 469 + ], + [ + 261, + 467 + ], + [ + 281, + 468 + ], + [ + 281, + 438 + ], + [ + 295, + 467 + ], + [ + 298, + 473 + ], + [ + 304, + 472 + ], + [ + 284, + 435 + ], + [ + 281, + 408 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 377, + 403 + ], + [ + 380, + 462 + ], + [ + 372, + 461 + ], + [ + 372, + 416 + ], + [ + 361, + 408 + ], + [ + 349, + 404 + ], + [ + 348, + 382 + ], + [ + 347, + 370 + ], + [ + 322, + 369 + ], + [ + 323, + 341 + ], + [ + 327, + 254 + ], + [ + 328, + 236 + ], + [ + 331, + 217 + ], + [ + 342, + 200 + ], + [ + 357, + 183 + ], + [ + 364, + 180 + ], + [ + 389, + 198 + ], + [ + 406, + 224 + ], + [ + 410, + 236 + ], + [ + 416, + 246 + ], + [ + 419, + 258 + ], + [ + 419, + 270 + ], + [ + 428, + 280 + ], + [ + 436, + 308 + ], + [ + 437, + 347 + ], + [ + 437, + 371 + ], + [ + 430, + 382 + ], + [ + 421, + 392 + ], + [ + 399, + 400 + ], + [ + 384, + 403 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 297, + 279 + ], + [ + 297, + 348 + ], + [ + 0, + 335 + ], + [ + 0, + 262 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 360, + 353 + ], + [ + 359, + 471 + ], + [ + 349, + 471 + ], + [ + 351, + 320 + ], + [ + 0, + 308 + ], + [ + 0, + 296 + ], + [ + 362, + 309 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 89, + 498 + ], + [ + 86, + 483 + ], + [ + 86, + 469 + ], + [ + 90, + 465 + ], + [ + 95, + 454 + ], + [ + 101, + 447 + ], + [ + 118, + 446 + ], + [ + 125, + 446 + ], + [ + 133, + 445 + ], + [ + 144, + 448 + ], + [ + 149, + 452 + ], + [ + 153, + 459 + ], + [ + 157, + 459 + ], + [ + 160, + 460 + ], + [ + 161, + 464 + ], + [ + 156, + 466 + ], + [ + 157, + 472 + ], + [ + 157, + 480 + ], + [ + 157, + 491 + ], + [ + 157, + 496 + ], + [ + 150, + 498 + ], + [ + 147, + 492 + ], + [ + 144, + 491 + ], + [ + 122, + 493 + ], + [ + 108, + 494 + ], + [ + 100, + 493 + ], + [ + 99, + 499 + ], + [ + 97, + 500 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 477, + 397 + ], + [ + 459, + 399 + ], + [ + 457, + 408 + ], + [ + 464, + 468 + ], + [ + 456, + 468 + ], + [ + 452, + 425 + ], + [ + 451, + 404 + ], + [ + 443, + 401 + ], + [ + 414, + 398 + ], + [ + 403, + 364 + ], + [ + 418, + 282 + ], + [ + 422, + 269 + ], + [ + 434, + 255 + ], + [ + 444, + 240 + ], + [ + 464, + 236 + ], + [ + 479, + 228 + ], + [ + 505, + 213 + ], + [ + 522, + 212 + ], + [ + 533, + 211 + ], + [ + 554, + 290 + ], + [ + 552, + 323 + ], + [ + 541, + 340 + ], + [ + 518, + 359 + ], + [ + 502, + 374 + ], + [ + 493, + 384 + ], + [ + 483, + 394 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 505, + 373 + ], + [ + 506, + 470 + ], + [ + 508, + 470 + ], + [ + 510, + 356 + ], + [ + 505, + 357 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 731, + 293 + ], + [ + 731, + 434 + ], + [ + 737, + 434 + ], + [ + 735, + 295 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 749, + 310 + ], + [ + 748, + 427 + ], + [ + 752, + 427 + ], + [ + 754, + 307 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1110, + 354 + ], + [ + 1112, + 383 + ], + [ + 1115, + 382 + ], + [ + 1114, + 353 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1128, + 358 + ], + [ + 1132, + 331 + ], + [ + 1099, + 332 + ], + [ + 1099, + 356 + ], + [ + 1099, + 362 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 663, + 470 + ], + [ + 659, + 362 + ], + [ + 664, + 362 + ], + [ + 665, + 466 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 541, + 392 + ], + [ + 537, + 448 + ], + [ + 542, + 473 + ], + [ + 550, + 471 + ], + [ + 549, + 438 + ], + [ + 549, + 406 + ], + [ + 550, + 383 + ], + [ + 560, + 375 + ], + [ + 570, + 380 + ], + [ + 586, + 392 + ], + [ + 604, + 394 + ], + [ + 611, + 387 + ], + [ + 616, + 393 + ], + [ + 616, + 425 + ], + [ + 614, + 469 + ], + [ + 628, + 471 + ], + [ + 623, + 441 + ], + [ + 624, + 408 + ], + [ + 632, + 394 + ], + [ + 645, + 389 + ], + [ + 661, + 394 + ], + [ + 674, + 385 + ], + [ + 680, + 385 + ], + [ + 689, + 400 + ], + [ + 688, + 440 + ], + [ + 690, + 467 + ], + [ + 697, + 477 + ], + [ + 705, + 469 + ], + [ + 701, + 432 + ], + [ + 704, + 394 + ], + [ + 710, + 382 + ], + [ + 726, + 390 + ], + [ + 739, + 396 + ], + [ + 743, + 387 + ], + [ + 734, + 378 + ], + [ + 723, + 372 + ], + [ + 732, + 357 + ], + [ + 740, + 346 + ], + [ + 749, + 340 + ], + [ + 757, + 339 + ], + [ + 765, + 342 + ], + [ + 781, + 345 + ], + [ + 782, + 322 + ], + [ + 772, + 276 + ], + [ + 760, + 229 + ], + [ + 746, + 234 + ], + [ + 736, + 225 + ], + [ + 736, + 215 + ], + [ + 722, + 194 + ], + [ + 708, + 188 + ], + [ + 699, + 201 + ], + [ + 686, + 208 + ], + [ + 675, + 203 + ], + [ + 675, + 194 + ], + [ + 661, + 192 + ], + [ + 650, + 192 + ], + [ + 649, + 202 + ], + [ + 645, + 181 + ], + [ + 651, + 170 + ], + [ + 642, + 156 + ], + [ + 628, + 142 + ], + [ + 610, + 139 + ], + [ + 587, + 138 + ], + [ + 575, + 148 + ], + [ + 563, + 162 + ], + [ + 557, + 180 + ], + [ + 550, + 195 + ], + [ + 547, + 201 + ], + [ + 533, + 210 + ], + [ + 524, + 218 + ], + [ + 502, + 263 + ], + [ + 504, + 307 + ], + [ + 515, + 344 + ], + [ + 525, + 356 + ], + [ + 540, + 371 + ], + [ + 540, + 378 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 833, + 355 + ], + [ + 832, + 450 + ], + [ + 837, + 450 + ], + [ + 837, + 352 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 828, + 408 + ], + [ + 826, + 412 + ], + [ + 829, + 418 + ], + [ + 833, + 422 + ], + [ + 840, + 421 + ], + [ + 842, + 415 + ], + [ + 842, + 408 + ], + [ + 840, + 405 + ], + [ + 834, + 403 + ], + [ + 830, + 405 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 777, + 476 + ], + [ + 777, + 447 + ], + [ + 780, + 413 + ], + [ + 787, + 359 + ], + [ + 787, + 348 + ], + [ + 766, + 348 + ], + [ + 749, + 338 + ], + [ + 741, + 311 + ], + [ + 747, + 267 + ], + [ + 747, + 225 + ], + [ + 741, + 219 + ], + [ + 732, + 208 + ], + [ + 735, + 190 + ], + [ + 747, + 162 + ], + [ + 754, + 147 + ], + [ + 754, + 136 + ], + [ + 744, + 120 + ], + [ + 753, + 110 + ], + [ + 762, + 112 + ], + [ + 758, + 104 + ], + [ + 760, + 86 + ], + [ + 773, + 82 + ], + [ + 782, + 85 + ], + [ + 788, + 85 + ], + [ + 786, + 64 + ], + [ + 798, + 58 + ], + [ + 823, + 57 + ], + [ + 831, + 64 + ], + [ + 820, + 77 + ], + [ + 822, + 84 + ], + [ + 830, + 78 + ], + [ + 837, + 81 + ], + [ + 836, + 92 + ], + [ + 851, + 93 + ], + [ + 845, + 104 + ], + [ + 851, + 114 + ], + [ + 845, + 118 + ], + [ + 861, + 98 + ], + [ + 865, + 81 + ], + [ + 879, + 74 + ], + [ + 890, + 74 + ], + [ + 895, + 90 + ], + [ + 912, + 94 + ], + [ + 923, + 103 + ], + [ + 930, + 111 + ], + [ + 945, + 122 + ], + [ + 967, + 209 + ], + [ + 969, + 271 + ], + [ + 962, + 302 + ], + [ + 958, + 357 + ], + [ + 951, + 367 + ], + [ + 930, + 364 + ], + [ + 908, + 362 + ], + [ + 893, + 360 + ], + [ + 892, + 375 + ], + [ + 899, + 422 + ], + [ + 887, + 422 + ], + [ + 885, + 406 + ], + [ + 885, + 367 + ], + [ + 877, + 352 + ], + [ + 867, + 346 + ], + [ + 853, + 362 + ], + [ + 842, + 367 + ], + [ + 826, + 365 + ], + [ + 816, + 359 + ], + [ + 800, + 353 + ], + [ + 795, + 381 + ], + [ + 787, + 450 + ], + [ + 786, + 477 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1094, + 510 + ], + [ + 1092, + 247 + ], + [ + 1099, + 247 + ], + [ + 1100, + 512 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 992, + 451 + ], + [ + 991, + 485 + ], + [ + 1005, + 486 + ], + [ + 1009, + 426 + ], + [ + 1018, + 357 + ], + [ + 1021, + 348 + ], + [ + 1058, + 343 + ], + [ + 1068, + 334 + ], + [ + 1088, + 334 + ], + [ + 1092, + 321 + ], + [ + 1104, + 310 + ], + [ + 1127, + 296 + ], + [ + 1127, + 281 + ], + [ + 1134, + 273 + ], + [ + 1137, + 279 + ], + [ + 1162, + 475 + ], + [ + 1168, + 507 + ], + [ + 1169, + 518 + ], + [ + 1186, + 513 + ], + [ + 1179, + 456 + ], + [ + 1165, + 388 + ], + [ + 1159, + 319 + ], + [ + 1162, + 298 + ], + [ + 1156, + 277 + ], + [ + 1170, + 295 + ], + [ + 1188, + 301 + ], + [ + 1211, + 306 + ], + [ + 1238, + 299 + ], + [ + 1256, + 289 + ], + [ + 1266, + 275 + ], + [ + 1245, + 257 + ], + [ + 1267, + 255 + ], + [ + 1297, + 241 + ], + [ + 1301, + 214 + ], + [ + 1281, + 208 + ], + [ + 1279, + 199 + ], + [ + 1296, + 180 + ], + [ + 1319, + 146 + ], + [ + 1317, + 136 + ], + [ + 1311, + 132 + ], + [ + 1322, + 117 + ], + [ + 1333, + 104 + ], + [ + 1336, + 94 + ], + [ + 1328, + 97 + ], + [ + 1308, + 96 + ], + [ + 1312, + 83 + ], + [ + 1319, + 75 + ], + [ + 1319, + 63 + ], + [ + 1311, + 61 + ], + [ + 1302, + 60 + ], + [ + 1308, + 51 + ], + [ + 1303, + 47 + ], + [ + 1295, + 51 + ], + [ + 1282, + 57 + ], + [ + 1265, + 58 + ], + [ + 1268, + 45 + ], + [ + 1256, + 43 + ], + [ + 1249, + 47 + ], + [ + 1242, + 37 + ], + [ + 1230, + 40 + ], + [ + 1226, + 50 + ], + [ + 1226, + 61 + ], + [ + 1217, + 48 + ], + [ + 1219, + 36 + ], + [ + 1235, + 22 + ], + [ + 1254, + 5 + ], + [ + 1254, + 0 + ], + [ + 1037, + 0 + ], + [ + 1027, + 3 + ], + [ + 1024, + 6 + ], + [ + 1021, + 0 + ], + [ + 948, + 0 + ], + [ + 956, + 9 + ], + [ + 962, + 21 + ], + [ + 956, + 21 + ], + [ + 941, + 19 + ], + [ + 932, + 14 + ], + [ + 923, + 16 + ], + [ + 922, + 21 + ], + [ + 931, + 28 + ], + [ + 946, + 43 + ], + [ + 954, + 52 + ], + [ + 956, + 58 + ], + [ + 948, + 62 + ], + [ + 939, + 77 + ], + [ + 941, + 88 + ], + [ + 942, + 96 + ], + [ + 934, + 98 + ], + [ + 931, + 102 + ], + [ + 936, + 109 + ], + [ + 929, + 122 + ], + [ + 920, + 157 + ], + [ + 919, + 263 + ], + [ + 943, + 336 + ], + [ + 970, + 344 + ], + [ + 988, + 347 + ], + [ + 996, + 353 + ], + [ + 1001, + 364 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1154, + 522 + ], + [ + 1155, + 492 + ], + [ + 1162, + 492 + ], + [ + 1163, + 521 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1311, + 559 + ], + [ + 1316, + 509 + ], + [ + 1322, + 511 + ], + [ + 1325, + 554 + ], + [ + 1323, + 559 + ], + [ + 1319, + 524 + ], + [ + 1315, + 559 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1362, + 559 + ], + [ + 1362, + 364 + ], + [ + 1357, + 362 + ], + [ + 1346, + 367 + ], + [ + 1332, + 367 + ], + [ + 1322, + 368 + ], + [ + 1314, + 360 + ], + [ + 1293, + 344 + ], + [ + 1284, + 338 + ], + [ + 1287, + 320 + ], + [ + 1295, + 311 + ], + [ + 1298, + 302 + ], + [ + 1293, + 297 + ], + [ + 1282, + 291 + ], + [ + 1280, + 279 + ], + [ + 1286, + 264 + ], + [ + 1295, + 267 + ], + [ + 1312, + 265 + ], + [ + 1309, + 255 + ], + [ + 1297, + 244 + ], + [ + 1291, + 213 + ], + [ + 1300, + 203 + ], + [ + 1305, + 203 + ], + [ + 1297, + 196 + ], + [ + 1284, + 186 + ], + [ + 1288, + 143 + ], + [ + 1323, + 132 + ], + [ + 1335, + 126 + ], + [ + 1346, + 124 + ], + [ + 1359, + 124 + ], + [ + 1365, + 125 + ], + [ + 1380, + 105 + ], + [ + 1390, + 102 + ], + [ + 1420, + 95 + ], + [ + 1433, + 104 + ], + [ + 1448, + 119 + ], + [ + 1464, + 128 + ], + [ + 1475, + 130 + ], + [ + 1489, + 138 + ], + [ + 1501, + 149 + ], + [ + 1514, + 163 + ], + [ + 1524, + 180 + ], + [ + 1531, + 199 + ], + [ + 1529, + 206 + ], + [ + 1535, + 214 + ], + [ + 1536, + 226 + ], + [ + 1518, + 231 + ], + [ + 1510, + 235 + ], + [ + 1515, + 238 + ], + [ + 1524, + 242 + ], + [ + 1518, + 248 + ], + [ + 1506, + 251 + ], + [ + 1494, + 246 + ], + [ + 1476, + 255 + ], + [ + 1464, + 259 + ], + [ + 1475, + 268 + ], + [ + 1484, + 279 + ], + [ + 1478, + 285 + ], + [ + 1471, + 283 + ], + [ + 1446, + 281 + ], + [ + 1432, + 282 + ], + [ + 1431, + 288 + ], + [ + 1446, + 287 + ], + [ + 1451, + 290 + ], + [ + 1448, + 307 + ], + [ + 1441, + 312 + ], + [ + 1432, + 313 + ], + [ + 1438, + 323 + ], + [ + 1430, + 327 + ], + [ + 1418, + 321 + ], + [ + 1407, + 322 + ], + [ + 1402, + 325 + ], + [ + 1415, + 329 + ], + [ + 1425, + 339 + ], + [ + 1429, + 351 + ], + [ + 1422, + 355 + ], + [ + 1402, + 358 + ], + [ + 1387, + 356 + ], + [ + 1375, + 359 + ], + [ + 1368, + 368 + ], + [ + 1368, + 370 + ], + [ + 1365, + 558 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1353, + 558 + ], + [ + 1354, + 424 + ], + [ + 1376, + 423 + ], + [ + 1378, + 558 + ], + [ + 1372, + 559 + ], + [ + 1368, + 434 + ], + [ + 1359, + 435 + ], + [ + 1360, + 559 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1742, + 111 + ], + [ + 1719, + 117 + ], + [ + 1705, + 123 + ], + [ + 1685, + 124 + ], + [ + 1672, + 123 + ], + [ + 1661, + 118 + ], + [ + 1643, + 105 + ], + [ + 1640, + 109 + ], + [ + 1640, + 115 + ], + [ + 1640, + 125 + ], + [ + 1631, + 124 + ], + [ + 1625, + 128 + ], + [ + 1618, + 137 + ], + [ + 1611, + 132 + ], + [ + 1601, + 128 + ], + [ + 1589, + 131 + ], + [ + 1586, + 136 + ], + [ + 1587, + 144 + ], + [ + 1598, + 145 + ], + [ + 1591, + 155 + ], + [ + 1591, + 164 + ], + [ + 1580, + 169 + ], + [ + 1576, + 180 + ], + [ + 1581, + 190 + ], + [ + 1581, + 199 + ], + [ + 1571, + 187 + ], + [ + 1562, + 183 + ], + [ + 1549, + 186 + ], + [ + 1541, + 192 + ], + [ + 1539, + 204 + ], + [ + 1541, + 216 + ], + [ + 1536, + 224 + ], + [ + 1536, + 234 + ], + [ + 1529, + 247 + ], + [ + 1533, + 256 + ], + [ + 1538, + 265 + ], + [ + 1532, + 266 + ], + [ + 1531, + 275 + ], + [ + 1538, + 284 + ], + [ + 1548, + 287 + ], + [ + 1551, + 295 + ], + [ + 1561, + 297 + ], + [ + 1573, + 296 + ], + [ + 1569, + 290 + ], + [ + 1582, + 286 + ], + [ + 1589, + 286 + ], + [ + 1590, + 296 + ], + [ + 1612, + 302 + ], + [ + 1644, + 305 + ], + [ + 1729, + 315 + ], + [ + 1790, + 316 + ], + [ + 1810, + 318 + ], + [ + 1833, + 323 + ], + [ + 1841, + 317 + ], + [ + 1836, + 298 + ], + [ + 1828, + 284 + ], + [ + 1813, + 281 + ], + [ + 1796, + 281 + ], + [ + 1799, + 270 + ], + [ + 1818, + 265 + ], + [ + 1823, + 254 + ], + [ + 1820, + 241 + ], + [ + 1822, + 224 + ], + [ + 1821, + 213 + ], + [ + 1816, + 208 + ], + [ + 1808, + 208 + ], + [ + 1803, + 208 + ], + [ + 1802, + 201 + ], + [ + 1797, + 201 + ], + [ + 1787, + 191 + ], + [ + 1786, + 181 + ], + [ + 1792, + 173 + ], + [ + 1789, + 168 + ], + [ + 1775, + 164 + ], + [ + 1763, + 161 + ], + [ + 1765, + 153 + ], + [ + 1775, + 142 + ], + [ + 1775, + 130 + ], + [ + 1764, + 124 + ], + [ + 1751, + 118 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1840, + 505 + ], + [ + 1835, + 470 + ], + [ + 1833, + 414 + ], + [ + 1837, + 373 + ], + [ + 1847, + 245 + ], + [ + 1845, + 190 + ], + [ + 1849, + 120 + ], + [ + 1855, + 100 + ], + [ + 1847, + 54 + ], + [ + 1845, + 36 + ], + [ + 1845, + 12 + ], + [ + 1844, + 0 + ], + [ + 1821, + 0 + ], + [ + 1817, + 4 + ], + [ + 1806, + 7 + ], + [ + 1799, + 0 + ], + [ + 1791, + 0 + ], + [ + 1784, + 7 + ], + [ + 1768, + 12 + ], + [ + 1757, + 11 + ], + [ + 1751, + 2 + ], + [ + 1750, + 0 + ], + [ + 1956, + 0 + ], + [ + 1958, + 4 + ], + [ + 1960, + 12 + ], + [ + 1949, + 10 + ], + [ + 1945, + 4 + ], + [ + 1944, + 4 + ], + [ + 1934, + 6 + ], + [ + 1924, + 8 + ], + [ + 1924, + 0 + ], + [ + 1884, + 0 + ], + [ + 1890, + 9 + ], + [ + 1904, + 73 + ], + [ + 1905, + 81 + ], + [ + 1903, + 86 + ], + [ + 1903, + 93 + ], + [ + 1903, + 126 + ], + [ + 1898, + 152 + ], + [ + 1896, + 204 + ], + [ + 1896, + 245 + ], + [ + 1889, + 286 + ], + [ + 1883, + 373 + ], + [ + 1875, + 469 + ], + [ + 1875, + 501 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1663, + 607 + ], + [ + 1680, + 69 + ], + [ + 1679, + 21 + ], + [ + 1680, + 0 + ], + [ + 1699, + 0 + ], + [ + 1683, + 604 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 498, + 265 + ], + [ + 526, + 235 + ], + [ + 552, + 266 + ], + [ + 526, + 310 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 500, + 324 + ], + [ + 492, + 325 + ], + [ + 487, + 331 + ], + [ + 485, + 338 + ], + [ + 485, + 349 + ], + [ + 488, + 359 + ], + [ + 495, + 362 + ], + [ + 507, + 362 + ], + [ + 516, + 355 + ], + [ + 520, + 347 + ], + [ + 521, + 335 + ], + [ + 514, + 327 + ], + [ + 506, + 325 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 276, + 174 + ], + [ + 276, + 105 + ], + [ + 311, + 109 + ], + [ + 311, + 177 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 521, + 533 + ], + [ + 526, + 216 + ], + [ + 525, + 194 + ], + [ + 510, + 177 + ], + [ + 499, + 169 + ], + [ + 350, + 130 + ], + [ + 296, + 127 + ], + [ + 295, + 119 + ], + [ + 349, + 124 + ], + [ + 492, + 160 + ], + [ + 503, + 163 + ], + [ + 513, + 173 + ], + [ + 526, + 190 + ], + [ + 531, + 208 + ], + [ + 530, + 227 + ], + [ + 527, + 536 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 838, + 576 + ], + [ + 837, + 586 + ], + [ + 831, + 588 + ], + [ + 818, + 584 + ], + [ + 816, + 578 + ], + [ + 815, + 573 + ], + [ + 813, + 569 + ], + [ + 813, + 536 + ], + [ + 811, + 505 + ], + [ + 816, + 491 + ], + [ + 819, + 479 + ], + [ + 819, + 477 + ], + [ + 810, + 477 + ], + [ + 801, + 475 + ], + [ + 796, + 472 + ], + [ + 797, + 467 + ], + [ + 800, + 464 + ], + [ + 806, + 462 + ], + [ + 817, + 462 + ], + [ + 819, + 466 + ], + [ + 825, + 468 + ], + [ + 830, + 460 + ], + [ + 838, + 437 + ], + [ + 840, + 426 + ], + [ + 848, + 425 + ], + [ + 855, + 422 + ], + [ + 879, + 418 + ], + [ + 923, + 417 + ], + [ + 968, + 422 + ], + [ + 977, + 432 + ], + [ + 991, + 459 + ], + [ + 999, + 487 + ], + [ + 1002, + 513 + ], + [ + 1002, + 537 + ], + [ + 1001, + 560 + ], + [ + 998, + 569 + ], + [ + 998, + 581 + ], + [ + 996, + 588 + ], + [ + 986, + 590 + ], + [ + 976, + 588 + ], + [ + 975, + 586 + ], + [ + 973, + 575 + ], + [ + 962, + 574 + ], + [ + 955, + 574 + ], + [ + 955, + 570 + ], + [ + 950, + 569 + ], + [ + 850, + 568 + ], + [ + 847, + 573 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 884, + 519 + ], + [ + 884, + 505 + ], + [ + 942, + 505 + ], + [ + 940, + 519 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1733, + 777 + ], + [ + 1729, + 785 + ], + [ + 1724, + 796 + ], + [ + 1718, + 802 + ], + [ + 1706, + 803 + ], + [ + 1681, + 803 + ], + [ + 1664, + 796 + ], + [ + 1650, + 783 + ], + [ + 1645, + 757 + ], + [ + 1642, + 730 + ], + [ + 1642, + 705 + ], + [ + 1643, + 699 + ], + [ + 1633, + 690 + ], + [ + 1634, + 674 + ], + [ + 1635, + 660 + ], + [ + 1641, + 655 + ], + [ + 1645, + 645 + ], + [ + 1648, + 618 + ], + [ + 1656, + 602 + ], + [ + 1663, + 593 + ], + [ + 1681, + 589 + ], + [ + 1709, + 586 + ], + [ + 1717, + 585 + ], + [ + 1718, + 555 + ], + [ + 1725, + 554 + ], + [ + 1764, + 554 + ], + [ + 1826, + 487 + ], + [ + 1856, + 459 + ], + [ + 1883, + 445 + ], + [ + 1897, + 443 + ], + [ + 1904, + 435 + ], + [ + 1920, + 429 + ], + [ + 2025, + 416 + ], + [ + 2048, + 414 + ], + [ + 2048, + 415 + ], + [ + 2048, + 926 + ], + [ + 2043, + 948 + ], + [ + 2027, + 953 + ], + [ + 2008, + 954 + ], + [ + 1986, + 947 + ], + [ + 1966, + 929 + ], + [ + 1951, + 901 + ], + [ + 1942, + 878 + ], + [ + 1941, + 857 + ], + [ + 1940, + 851 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000134_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000134_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d00dcbae520e2bcb06ecc0a039103e32a90f3d53 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000134_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000135_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000135_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c74fa171b406f6173733167af9f542d9f43523b0 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000135_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000135_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000135_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..31f66c50d1db73e57b9c036aa0fc9268197ab600 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000135_000019_gtFine_polygons.json @@ -0,0 +1,8707 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "terrain", + "polygon": [ + [ + 167, + 585 + ], + [ + 227, + 581 + ], + [ + 261, + 571 + ], + [ + 288, + 567 + ], + [ + 293, + 563 + ], + [ + 281, + 558 + ], + [ + 271, + 557 + ], + [ + 268, + 547 + ], + [ + 262, + 535 + ], + [ + 249, + 537 + ], + [ + 240, + 534 + ], + [ + 224, + 534 + ], + [ + 207, + 533 + ], + [ + 189, + 535 + ], + [ + 171, + 536 + ], + [ + 150, + 538 + ], + [ + 130, + 539 + ], + [ + 103, + 542 + ], + [ + 84, + 551 + ], + [ + 77, + 564 + ], + [ + 91, + 574 + ], + [ + 107, + 583 + ], + [ + 126, + 583 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1434, + 589 + ], + [ + 1347, + 590 + ], + [ + 1318, + 589 + ], + [ + 1276, + 567 + ], + [ + 1269, + 552 + ], + [ + 1289, + 525 + ], + [ + 1292, + 514 + ], + [ + 1284, + 501 + ], + [ + 1300, + 481 + ], + [ + 1320, + 486 + ], + [ + 1335, + 489 + ], + [ + 1349, + 487 + ], + [ + 1356, + 480 + ], + [ + 1388, + 479 + ], + [ + 1411, + 473 + ], + [ + 1431, + 457 + ], + [ + 1456, + 450 + ], + [ + 1488, + 456 + ], + [ + 1502, + 456 + ], + [ + 1544, + 459 + ], + [ + 1571, + 463 + ], + [ + 1577, + 491 + ], + [ + 1548, + 533 + ], + [ + 1508, + 573 + ], + [ + 1500, + 582 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 571, + 33 + ], + [ + 777, + 313 + ], + [ + 944, + 336 + ], + [ + 1123, + 322 + ], + [ + 1197, + 0 + ], + [ + 564, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2048, + 548 + ], + [ + 1122, + 445 + ], + [ + 1063, + 443 + ], + [ + 965, + 449 + ], + [ + 855, + 456 + ], + [ + 428, + 486 + ], + [ + 0, + 561 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 779 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1037, + 286 + ], + [ + 954, + 292 + ], + [ + 950, + 235 + ], + [ + 906, + 238 + ], + [ + 908, + 275 + ], + [ + 894, + 275 + ], + [ + 895, + 294 + ], + [ + 862, + 296 + ], + [ + 698, + 225 + ], + [ + 472, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 548 + ], + [ + 313, + 517 + ], + [ + 477, + 487 + ], + [ + 684, + 464 + ], + [ + 794, + 452 + ], + [ + 838, + 454 + ], + [ + 875, + 452 + ], + [ + 973, + 450 + ], + [ + 1021, + 445 + ], + [ + 1059, + 445 + ], + [ + 1089, + 442 + ], + [ + 1153, + 441 + ], + [ + 1294, + 462 + ], + [ + 1560, + 485 + ], + [ + 1876, + 514 + ], + [ + 2048, + 551 + ], + [ + 2048, + 0 + ], + [ + 1242, + 0 + ], + [ + 1163, + 270 + ], + [ + 1086, + 282 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 865, + 427 + ], + [ + 861, + 427 + ], + [ + 858, + 428 + ], + [ + 858, + 431 + ], + [ + 861, + 435 + ], + [ + 865, + 435 + ], + [ + 867, + 431 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 837, + 430 + ], + [ + 837, + 440 + ], + [ + 831, + 440 + ], + [ + 832, + 429 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 886, + 416 + ], + [ + 883, + 393 + ], + [ + 884, + 384 + ], + [ + 889, + 382 + ], + [ + 900, + 382 + ], + [ + 901, + 384 + ], + [ + 894, + 384 + ], + [ + 890, + 384 + ], + [ + 886, + 386 + ], + [ + 883, + 392 + ], + [ + 888, + 413 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 897, + 395 + ], + [ + 897, + 382 + ], + [ + 902, + 382 + ], + [ + 902, + 394 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 989, + 387 + ], + [ + 998, + 378 + ], + [ + 1001, + 374 + ], + [ + 1020, + 368 + ], + [ + 1030, + 367 + ], + [ + 1045, + 365 + ], + [ + 1044, + 367 + ], + [ + 1019, + 370 + ], + [ + 1006, + 373 + ], + [ + 1001, + 376 + ], + [ + 993, + 384 + ], + [ + 987, + 394 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1041, + 359 + ], + [ + 1046, + 358 + ], + [ + 1046, + 375 + ], + [ + 1039, + 374 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 990, + 353 + ], + [ + 981, + 353 + ], + [ + 974, + 354 + ], + [ + 972, + 364 + ], + [ + 972, + 370 + ], + [ + 971, + 374 + ], + [ + 966, + 375 + ], + [ + 962, + 381 + ], + [ + 963, + 390 + ], + [ + 963, + 396 + ], + [ + 960, + 405 + ], + [ + 957, + 416 + ], + [ + 956, + 427 + ], + [ + 960, + 433 + ], + [ + 972, + 440 + ], + [ + 983, + 444 + ], + [ + 991, + 434 + ], + [ + 994, + 424 + ], + [ + 1001, + 423 + ], + [ + 1006, + 422 + ], + [ + 1008, + 414 + ], + [ + 1007, + 407 + ], + [ + 1003, + 404 + ], + [ + 1002, + 393 + ], + [ + 999, + 386 + ], + [ + 998, + 378 + ], + [ + 998, + 372 + ], + [ + 995, + 367 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 979, + 408 + ], + [ + 976, + 408 + ], + [ + 976, + 405 + ], + [ + 996, + 404 + ], + [ + 998, + 406 + ], + [ + 998, + 408 + ], + [ + 995, + 408 + ], + [ + 996, + 436 + ], + [ + 981, + 436 + ], + [ + 980, + 436 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 995, + 402 + ], + [ + 1002, + 402 + ], + [ + 1002, + 409 + ], + [ + 996, + 409 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1029, + 379 + ], + [ + 1021, + 388 + ], + [ + 1017, + 400 + ], + [ + 1014, + 412 + ], + [ + 1019, + 422 + ], + [ + 1024, + 431 + ], + [ + 1034, + 438 + ], + [ + 1034, + 443 + ], + [ + 1043, + 447 + ], + [ + 1047, + 440 + ], + [ + 1059, + 433 + ], + [ + 1058, + 432 + ], + [ + 1061, + 425 + ], + [ + 1065, + 426 + ], + [ + 1065, + 440 + ], + [ + 1072, + 441 + ], + [ + 1073, + 428 + ], + [ + 1078, + 422 + ], + [ + 1083, + 420 + ], + [ + 1086, + 406 + ], + [ + 1070, + 391 + ], + [ + 1068, + 383 + ], + [ + 1064, + 380 + ], + [ + 1056, + 387 + ], + [ + 1050, + 390 + ], + [ + 1045, + 381 + ], + [ + 1041, + 374 + ], + [ + 1039, + 371 + ], + [ + 1031, + 376 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1096, + 456 + ], + [ + 1084, + 458 + ], + [ + 1077, + 457 + ], + [ + 1074, + 449 + ], + [ + 1075, + 440 + ], + [ + 1084, + 438 + ], + [ + 1092, + 438 + ], + [ + 1098, + 434 + ], + [ + 1105, + 430 + ], + [ + 1115, + 427 + ], + [ + 1119, + 432 + ], + [ + 1116, + 444 + ], + [ + 1107, + 451 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1089, + 363 + ], + [ + 1091, + 440 + ], + [ + 1095, + 440 + ], + [ + 1093, + 359 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1098, + 451 + ], + [ + 1077, + 455 + ], + [ + 1184, + 529 + ], + [ + 1877, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 592 + ], + [ + 1146, + 439 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1098, + 451 + ], + [ + 1077, + 455 + ], + [ + 1184, + 529 + ], + [ + 1877, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 592 + ], + [ + 1146, + 439 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1099, + 359 + ], + [ + 1095, + 359 + ], + [ + 1097, + 438 + ], + [ + 1100, + 437 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1121, + 427 + ], + [ + 1119, + 384 + ], + [ + 1117, + 377 + ], + [ + 1107, + 375 + ], + [ + 1107, + 372 + ], + [ + 1093, + 372 + ], + [ + 1087, + 378 + ], + [ + 1072, + 377 + ], + [ + 1059, + 363 + ], + [ + 1051, + 351 + ], + [ + 1042, + 341 + ], + [ + 1029, + 332 + ], + [ + 1018, + 318 + ], + [ + 1017, + 303 + ], + [ + 1014, + 297 + ], + [ + 1006, + 295 + ], + [ + 1007, + 284 + ], + [ + 1017, + 275 + ], + [ + 1030, + 269 + ], + [ + 1048, + 267 + ], + [ + 1071, + 269 + ], + [ + 1095, + 271 + ], + [ + 1112, + 279 + ], + [ + 1127, + 301 + ], + [ + 1134, + 320 + ], + [ + 1150, + 325 + ], + [ + 1155, + 328 + ], + [ + 1159, + 337 + ], + [ + 1165, + 349 + ], + [ + 1164, + 361 + ], + [ + 1151, + 372 + ], + [ + 1142, + 379 + ], + [ + 1133, + 379 + ], + [ + 1124, + 380 + ], + [ + 1124, + 380 + ], + [ + 1124, + 432 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1141, + 394 + ], + [ + 1123, + 394 + ], + [ + 1123, + 423 + ], + [ + 1141, + 423 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1140, + 426 + ], + [ + 1137, + 412 + ], + [ + 1136, + 399 + ], + [ + 1136, + 383 + ], + [ + 1135, + 379 + ], + [ + 1132, + 355 + ], + [ + 1105, + 335 + ], + [ + 1058, + 283 + ], + [ + 1031, + 270 + ], + [ + 1019, + 268 + ], + [ + 1008, + 266 + ], + [ + 999, + 260 + ], + [ + 1028, + 256 + ], + [ + 1001, + 258 + ], + [ + 1008, + 251 + ], + [ + 1029, + 247 + ], + [ + 1014, + 246 + ], + [ + 1004, + 242 + ], + [ + 1004, + 236 + ], + [ + 996, + 236 + ], + [ + 990, + 240 + ], + [ + 978, + 244 + ], + [ + 964, + 245 + ], + [ + 955, + 237 + ], + [ + 948, + 235 + ], + [ + 934, + 231 + ], + [ + 927, + 222 + ], + [ + 921, + 211 + ], + [ + 921, + 201 + ], + [ + 937, + 194 + ], + [ + 968, + 185 + ], + [ + 1024, + 175 + ], + [ + 1069, + 170 + ], + [ + 1090, + 170 + ], + [ + 1118, + 170 + ], + [ + 1136, + 173 + ], + [ + 1155, + 176 + ], + [ + 1165, + 178 + ], + [ + 1181, + 192 + ], + [ + 1201, + 215 + ], + [ + 1211, + 253 + ], + [ + 1211, + 274 + ], + [ + 1212, + 290 + ], + [ + 1211, + 311 + ], + [ + 1207, + 326 + ], + [ + 1205, + 334 + ], + [ + 1201, + 342 + ], + [ + 1190, + 349 + ], + [ + 1168, + 364 + ], + [ + 1156, + 365 + ], + [ + 1147, + 368 + ], + [ + 1139, + 375 + ], + [ + 1147, + 437 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1117, + 460 + ], + [ + 1111, + 460 + ], + [ + 1106, + 460 + ], + [ + 1102, + 457 + ], + [ + 1101, + 451 + ], + [ + 1103, + 448 + ], + [ + 1103, + 442 + ], + [ + 1114, + 439 + ], + [ + 1124, + 439 + ], + [ + 1124, + 447 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1166, + 415 + ], + [ + 1175, + 415 + ], + [ + 1190, + 416 + ], + [ + 1195, + 419 + ], + [ + 1195, + 428 + ], + [ + 1174, + 445 + ], + [ + 1144, + 452 + ], + [ + 1133, + 456 + ], + [ + 1130, + 460 + ], + [ + 1128, + 464 + ], + [ + 1124, + 467 + ], + [ + 1118, + 467 + ], + [ + 1113, + 460 + ], + [ + 1111, + 439 + ], + [ + 1112, + 426 + ], + [ + 1114, + 422 + ], + [ + 1125, + 419 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1156, + 436 + ], + [ + 1151, + 436 + ], + [ + 1147, + 392 + ], + [ + 1147, + 374 + ], + [ + 1140, + 348 + ], + [ + 977, + 196 + ], + [ + 936, + 189 + ], + [ + 925, + 183 + ], + [ + 910, + 175 + ], + [ + 897, + 173 + ], + [ + 886, + 159 + ], + [ + 889, + 156 + ], + [ + 912, + 159 + ], + [ + 910, + 152 + ], + [ + 906, + 145 + ], + [ + 904, + 138 + ], + [ + 910, + 128 + ], + [ + 923, + 136 + ], + [ + 934, + 137 + ], + [ + 934, + 131 + ], + [ + 928, + 125 + ], + [ + 926, + 117 + ], + [ + 935, + 118 + ], + [ + 928, + 107 + ], + [ + 935, + 105 + ], + [ + 957, + 120 + ], + [ + 957, + 111 + ], + [ + 953, + 102 + ], + [ + 938, + 82 + ], + [ + 942, + 81 + ], + [ + 962, + 90 + ], + [ + 992, + 101 + ], + [ + 1034, + 117 + ], + [ + 1080, + 164 + ], + [ + 1142, + 175 + ], + [ + 1174, + 165 + ], + [ + 1216, + 183 + ], + [ + 1258, + 200 + ], + [ + 1279, + 216 + ], + [ + 1293, + 234 + ], + [ + 1277, + 245 + ], + [ + 1291, + 252 + ], + [ + 1294, + 260 + ], + [ + 1273, + 263 + ], + [ + 1296, + 270 + ], + [ + 1293, + 279 + ], + [ + 1266, + 280 + ], + [ + 1271, + 285 + ], + [ + 1271, + 296 + ], + [ + 1260, + 295 + ], + [ + 1276, + 306 + ], + [ + 1267, + 310 + ], + [ + 1250, + 305 + ], + [ + 1233, + 303 + ], + [ + 1239, + 311 + ], + [ + 1238, + 316 + ], + [ + 1230, + 325 + ], + [ + 1215, + 325 + ], + [ + 1203, + 324 + ], + [ + 1153, + 352 + ], + [ + 1153, + 367 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1136, + 431 + ], + [ + 1141, + 426 + ], + [ + 1155, + 425 + ], + [ + 1164, + 425 + ], + [ + 1165, + 433 + ], + [ + 1162, + 443 + ], + [ + 1149, + 457 + ], + [ + 1142, + 462 + ], + [ + 1136, + 462 + ], + [ + 1129, + 462 + ], + [ + 1127, + 460 + ], + [ + 1126, + 453 + ], + [ + 1126, + 447 + ], + [ + 1128, + 440 + ], + [ + 1129, + 434 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1171, + 459 + ], + [ + 1166, + 464 + ], + [ + 1160, + 468 + ], + [ + 1155, + 472 + ], + [ + 1146, + 473 + ], + [ + 1141, + 469 + ], + [ + 1139, + 464 + ], + [ + 1138, + 452 + ], + [ + 1139, + 444 + ], + [ + 1142, + 436 + ], + [ + 1153, + 429 + ], + [ + 1161, + 425 + ], + [ + 1171, + 424 + ], + [ + 1187, + 423 + ], + [ + 1205, + 420 + ], + [ + 1212, + 421 + ], + [ + 1213, + 421 + ], + [ + 1211, + 432 + ], + [ + 1195, + 446 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1177, + 485 + ], + [ + 1150, + 489 + ], + [ + 1142, + 485 + ], + [ + 1143, + 475 + ], + [ + 1160, + 470 + ], + [ + 1176, + 470 + ], + [ + 1186, + 470 + ], + [ + 1194, + 473 + ], + [ + 1199, + 475 + ], + [ + 1197, + 480 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1170, + 394 + ], + [ + 1173, + 443 + ], + [ + 1191, + 443 + ], + [ + 1190, + 418 + ], + [ + 1186, + 371 + ], + [ + 1185, + 345 + ], + [ + 1188, + 339 + ], + [ + 1246, + 146 + ], + [ + 1248, + 0 + ], + [ + 939, + 0 + ], + [ + 935, + 1 + ], + [ + 941, + 14 + ], + [ + 956, + 28 + ], + [ + 971, + 41 + ], + [ + 966, + 50 + ], + [ + 975, + 59 + ], + [ + 968, + 72 + ], + [ + 963, + 82 + ], + [ + 974, + 106 + ], + [ + 1031, + 158 + ], + [ + 1160, + 336 + ], + [ + 1169, + 355 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1202, + 465 + ], + [ + 1194, + 476 + ], + [ + 1183, + 475 + ], + [ + 1178, + 471 + ], + [ + 1168, + 470 + ], + [ + 1163, + 462 + ], + [ + 1162, + 454 + ], + [ + 1161, + 445 + ], + [ + 1167, + 439 + ], + [ + 1174, + 436 + ], + [ + 1187, + 433 + ], + [ + 1197, + 431 + ], + [ + 1210, + 421 + ], + [ + 1231, + 410 + ], + [ + 1232, + 414 + ], + [ + 1225, + 434 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1237, + 408 + ], + [ + 1231, + 384 + ], + [ + 1233, + 347 + ], + [ + 1233, + 324 + ], + [ + 1231, + 270 + ], + [ + 1257, + 221 + ], + [ + 1278, + 211 + ], + [ + 1298, + 209 + ], + [ + 1328, + 196 + ], + [ + 1335, + 187 + ], + [ + 1316, + 178 + ], + [ + 1305, + 171 + ], + [ + 1321, + 162 + ], + [ + 1352, + 161 + ], + [ + 1362, + 157 + ], + [ + 1355, + 144 + ], + [ + 1336, + 143 + ], + [ + 1336, + 130 + ], + [ + 1344, + 122 + ], + [ + 1375, + 118 + ], + [ + 1386, + 99 + ], + [ + 1390, + 76 + ], + [ + 1394, + 62 + ], + [ + 1394, + 46 + ], + [ + 1387, + 34 + ], + [ + 1377, + 22 + ], + [ + 1405, + 23 + ], + [ + 1406, + 6 + ], + [ + 1402, + 0 + ], + [ + 1195, + 0 + ], + [ + 1185, + 178 + ], + [ + 1194, + 268 + ], + [ + 1214, + 304 + ], + [ + 1219, + 321 + ], + [ + 1220, + 399 + ], + [ + 1225, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1215, + 474 + ], + [ + 1201, + 478 + ], + [ + 1191, + 477 + ], + [ + 1185, + 471 + ], + [ + 1186, + 463 + ], + [ + 1188, + 459 + ], + [ + 1190, + 450 + ], + [ + 1189, + 445 + ], + [ + 1192, + 436 + ], + [ + 1203, + 428 + ], + [ + 1215, + 421 + ], + [ + 1234, + 414 + ], + [ + 1238, + 421 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1210, + 503 + ], + [ + 1196, + 501 + ], + [ + 1183, + 496 + ], + [ + 1187, + 482 + ], + [ + 1197, + 477 + ], + [ + 1212, + 477 + ], + [ + 1226, + 482 + ], + [ + 1229, + 491 + ], + [ + 1225, + 497 + ], + [ + 1218, + 500 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1210, + 503 + ], + [ + 1196, + 501 + ], + [ + 1183, + 496 + ], + [ + 1187, + 482 + ], + [ + 1197, + 477 + ], + [ + 1212, + 477 + ], + [ + 1226, + 482 + ], + [ + 1229, + 491 + ], + [ + 1225, + 497 + ], + [ + 1218, + 500 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1408, + 399 + ], + [ + 1402, + 394 + ], + [ + 1391, + 390 + ], + [ + 1381, + 386 + ], + [ + 1364, + 385 + ], + [ + 1332, + 384 + ], + [ + 1308, + 384 + ], + [ + 1253, + 389 + ], + [ + 1235, + 393 + ], + [ + 1227, + 395 + ], + [ + 1218, + 408 + ], + [ + 1209, + 432 + ], + [ + 1206, + 453 + ], + [ + 1206, + 464 + ], + [ + 1206, + 473 + ], + [ + 1205, + 476 + ], + [ + 1202, + 479 + ], + [ + 1205, + 483 + ], + [ + 1209, + 485 + ], + [ + 1218, + 486 + ], + [ + 1234, + 487 + ], + [ + 1341, + 456 + ], + [ + 1382, + 435 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1515, + 388 + ], + [ + 1514, + 380 + ], + [ + 1511, + 367 + ], + [ + 1505, + 366 + ], + [ + 1495, + 369 + ], + [ + 1493, + 379 + ], + [ + 1499, + 391 + ], + [ + 1508, + 397 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1575, + 364 + ], + [ + 1573, + 355 + ], + [ + 1564, + 353 + ], + [ + 1556, + 352 + ], + [ + 1548, + 358 + ], + [ + 1551, + 365 + ], + [ + 1554, + 369 + ], + [ + 1548, + 374 + ], + [ + 1546, + 388 + ], + [ + 1546, + 405 + ], + [ + 1546, + 420 + ], + [ + 1562, + 425 + ], + [ + 1574, + 427 + ], + [ + 1583, + 427 + ], + [ + 1584, + 417 + ], + [ + 1583, + 406 + ], + [ + 1579, + 390 + ], + [ + 1589, + 388 + ], + [ + 1592, + 384 + ], + [ + 1586, + 377 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 840, + 471 + ], + [ + 867, + 470 + ], + [ + 756, + 490 + ], + [ + 665, + 504 + ], + [ + 0, + 624 + ], + [ + 0, + 550 + ], + [ + 806, + 455 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1267, + 518 + ], + [ + 1239, + 520 + ], + [ + 1227, + 520 + ], + [ + 1218, + 514 + ], + [ + 1214, + 496 + ], + [ + 1217, + 480 + ], + [ + 1226, + 471 + ], + [ + 1254, + 455 + ], + [ + 1281, + 446 + ], + [ + 1300, + 442 + ], + [ + 1322, + 432 + ], + [ + 1344, + 422 + ], + [ + 1362, + 413 + ], + [ + 1443, + 395 + ], + [ + 1461, + 388 + ], + [ + 1487, + 385 + ], + [ + 1516, + 384 + ], + [ + 1535, + 384 + ], + [ + 1542, + 384 + ], + [ + 1538, + 392 + ], + [ + 1543, + 398 + ], + [ + 1545, + 409 + ], + [ + 1540, + 426 + ], + [ + 1386, + 483 + ], + [ + 1347, + 504 + ], + [ + 1331, + 508 + ], + [ + 1314, + 523 + ], + [ + 1304, + 528 + ], + [ + 1283, + 530 + ], + [ + 1276, + 527 + ], + [ + 1272, + 523 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 840, + 471 + ], + [ + 867, + 470 + ], + [ + 756, + 490 + ], + [ + 665, + 504 + ], + [ + 0, + 624 + ], + [ + 0, + 550 + ], + [ + 806, + 455 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1420, + 390 + ], + [ + 1377, + 399 + ], + [ + 1359, + 401 + ], + [ + 1347, + 417 + ], + [ + 1339, + 437 + ], + [ + 1337, + 460 + ], + [ + 1337, + 487 + ], + [ + 1349, + 505 + ], + [ + 1375, + 518 + ], + [ + 1443, + 503 + ], + [ + 1597, + 477 + ], + [ + 1628, + 449 + ], + [ + 1631, + 431 + ], + [ + 1626, + 426 + ], + [ + 1591, + 421 + ], + [ + 1571, + 417 + ], + [ + 1556, + 405 + ], + [ + 1534, + 394 + ], + [ + 1518, + 389 + ], + [ + 1481, + 389 + ], + [ + 1454, + 388 + ], + [ + 1432, + 388 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1471, + 560 + ], + [ + 1455, + 560 + ], + [ + 1452, + 549 + ], + [ + 1452, + 531 + ], + [ + 1452, + 465 + ], + [ + 1443, + 434 + ], + [ + 1432, + 384 + ], + [ + 1430, + 338 + ], + [ + 1427, + 300 + ], + [ + 1422, + 287 + ], + [ + 1423, + 261 + ], + [ + 1420, + 190 + ], + [ + 1405, + 87 + ], + [ + 1398, + 14 + ], + [ + 1393, + 0 + ], + [ + 1423, + 0 + ], + [ + 1430, + 5 + ], + [ + 1439, + 18 + ], + [ + 1434, + 34 + ], + [ + 1430, + 70 + ], + [ + 1441, + 169 + ], + [ + 1450, + 235 + ], + [ + 1452, + 281 + ], + [ + 1460, + 325 + ], + [ + 1462, + 381 + ], + [ + 1473, + 439 + ], + [ + 1480, + 485 + ], + [ + 1484, + 540 + ], + [ + 1488, + 553 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1679, + 426 + ], + [ + 1669, + 403 + ], + [ + 1667, + 385 + ], + [ + 1655, + 377 + ], + [ + 1655, + 348 + ], + [ + 1659, + 318 + ], + [ + 1659, + 295 + ], + [ + 1658, + 270 + ], + [ + 1652, + 221 + ], + [ + 1649, + 181 + ], + [ + 1650, + 133 + ], + [ + 1654, + 72 + ], + [ + 1653, + 38 + ], + [ + 1653, + 35 + ], + [ + 1664, + 30 + ], + [ + 1677, + 29 + ], + [ + 1682, + 32 + ], + [ + 1681, + 37 + ], + [ + 1680, + 43 + ], + [ + 1688, + 82 + ], + [ + 1699, + 133 + ], + [ + 1709, + 211 + ], + [ + 1716, + 279 + ], + [ + 1716, + 307 + ], + [ + 1729, + 338 + ], + [ + 1734, + 369 + ], + [ + 1733, + 390 + ], + [ + 1745, + 424 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1834, + 354 + ], + [ + 1832, + 341 + ], + [ + 1832, + 329 + ], + [ + 1839, + 323 + ], + [ + 1847, + 318 + ], + [ + 1846, + 312 + ], + [ + 1846, + 300 + ], + [ + 1855, + 296 + ], + [ + 1864, + 299 + ], + [ + 1866, + 311 + ], + [ + 1867, + 319 + ], + [ + 1881, + 321 + ], + [ + 1884, + 331 + ], + [ + 1881, + 348 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1977, + 21 + ], + [ + 1986, + 313 + ], + [ + 2010, + 505 + ], + [ + 2026, + 665 + ], + [ + 2033, + 685 + ], + [ + 2048, + 687 + ], + [ + 2048, + 685 + ], + [ + 2048, + 0 + ], + [ + 1978, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 876, + 425 + ], + [ + 881, + 425 + ], + [ + 882, + 457 + ], + [ + 877, + 457 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 872, + 468 + ], + [ + 858, + 467 + ], + [ + 847, + 457 + ], + [ + 856, + 451 + ], + [ + 864, + 450 + ], + [ + 879, + 455 + ], + [ + 886, + 462 + ], + [ + 878, + 466 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 955, + 425 + ], + [ + 943, + 429 + ], + [ + 941, + 439 + ], + [ + 947, + 451 + ], + [ + 956, + 456 + ], + [ + 965, + 453 + ], + [ + 966, + 443 + ], + [ + 964, + 437 + ], + [ + 960, + 432 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 866, + 421 + ], + [ + 865, + 453 + ], + [ + 862, + 453 + ], + [ + 861, + 420 + ], + [ + 859, + 422 + ], + [ + 846, + 420 + ], + [ + 830, + 408 + ], + [ + 830, + 397 + ], + [ + 839, + 386 + ], + [ + 853, + 381 + ], + [ + 874, + 381 + ], + [ + 879, + 389 + ], + [ + 881, + 392 + ], + [ + 886, + 401 + ], + [ + 883, + 412 + ], + [ + 879, + 419 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 856, + 459 + ], + [ + 855, + 415 + ], + [ + 857, + 404 + ], + [ + 847, + 401 + ], + [ + 835, + 400 + ], + [ + 822, + 394 + ], + [ + 818, + 387 + ], + [ + 808, + 392 + ], + [ + 796, + 391 + ], + [ + 786, + 378 + ], + [ + 790, + 346 + ], + [ + 801, + 283 + ], + [ + 821, + 268 + ], + [ + 836, + 264 + ], + [ + 852, + 263 + ], + [ + 860, + 273 + ], + [ + 864, + 281 + ], + [ + 870, + 287 + ], + [ + 879, + 293 + ], + [ + 883, + 302 + ], + [ + 883, + 316 + ], + [ + 894, + 315 + ], + [ + 900, + 322 + ], + [ + 909, + 332 + ], + [ + 910, + 358 + ], + [ + 906, + 368 + ], + [ + 895, + 381 + ], + [ + 887, + 390 + ], + [ + 885, + 394 + ], + [ + 877, + 405 + ], + [ + 865, + 409 + ], + [ + 857, + 411 + ], + [ + 858, + 454 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 841, + 450 + ], + [ + 841, + 431 + ], + [ + 838, + 415 + ], + [ + 837, + 405 + ], + [ + 835, + 390 + ], + [ + 809, + 367 + ], + [ + 798, + 210 + ], + [ + 802, + 179 + ], + [ + 814, + 163 + ], + [ + 829, + 164 + ], + [ + 832, + 176 + ], + [ + 837, + 181 + ], + [ + 843, + 185 + ], + [ + 848, + 184 + ], + [ + 855, + 183 + ], + [ + 865, + 195 + ], + [ + 866, + 205 + ], + [ + 875, + 212 + ], + [ + 874, + 227 + ], + [ + 874, + 241 + ], + [ + 867, + 251 + ], + [ + 870, + 256 + ], + [ + 860, + 264 + ], + [ + 849, + 275 + ], + [ + 848, + 331 + ], + [ + 844, + 383 + ], + [ + 843, + 409 + ], + [ + 845, + 458 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 851, + 436 + ], + [ + 851, + 446 + ], + [ + 852, + 454 + ], + [ + 849, + 457 + ], + [ + 848, + 451 + ], + [ + 847, + 442 + ], + [ + 847, + 436 + ], + [ + 847, + 434 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 830, + 449 + ], + [ + 827, + 407 + ], + [ + 827, + 403 + ], + [ + 822, + 376 + ], + [ + 809, + 331 + ], + [ + 819, + 302 + ], + [ + 859, + 314 + ], + [ + 865, + 340 + ], + [ + 848, + 371 + ], + [ + 839, + 395 + ], + [ + 834, + 403 + ], + [ + 831, + 463 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 796, + 444 + ], + [ + 794, + 434 + ], + [ + 791, + 420 + ], + [ + 788, + 401 + ], + [ + 788, + 377 + ], + [ + 777, + 381 + ], + [ + 762, + 381 + ], + [ + 738, + 381 + ], + [ + 736, + 379 + ], + [ + 721, + 379 + ], + [ + 719, + 378 + ], + [ + 724, + 372 + ], + [ + 716, + 363 + ], + [ + 706, + 367 + ], + [ + 701, + 375 + ], + [ + 694, + 376 + ], + [ + 677, + 369 + ], + [ + 672, + 353 + ], + [ + 678, + 344 + ], + [ + 689, + 345 + ], + [ + 684, + 335 + ], + [ + 678, + 330 + ], + [ + 683, + 305 + ], + [ + 698, + 218 + ], + [ + 721, + 148 + ], + [ + 746, + 88 + ], + [ + 764, + 65 + ], + [ + 781, + 57 + ], + [ + 798, + 63 + ], + [ + 814, + 67 + ], + [ + 815, + 79 + ], + [ + 815, + 91 + ], + [ + 822, + 97 + ], + [ + 828, + 104 + ], + [ + 829, + 108 + ], + [ + 827, + 120 + ], + [ + 818, + 122 + ], + [ + 816, + 131 + ], + [ + 817, + 139 + ], + [ + 827, + 140 + ], + [ + 832, + 136 + ], + [ + 839, + 136 + ], + [ + 842, + 145 + ], + [ + 834, + 160 + ], + [ + 831, + 169 + ], + [ + 822, + 198 + ], + [ + 817, + 323 + ], + [ + 798, + 361 + ], + [ + 793, + 384 + ], + [ + 793, + 397 + ], + [ + 798, + 432 + ], + [ + 802, + 444 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 825, + 450 + ], + [ + 836, + 450 + ], + [ + 843, + 447 + ], + [ + 848, + 450 + ], + [ + 853, + 458 + ], + [ + 851, + 468 + ], + [ + 845, + 469 + ], + [ + 837, + 465 + ], + [ + 830, + 459 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 806, + 447 + ], + [ + 811, + 442 + ], + [ + 815, + 438 + ], + [ + 833, + 438 + ], + [ + 841, + 440 + ], + [ + 844, + 446 + ], + [ + 841, + 466 + ], + [ + 840, + 469 + ], + [ + 839, + 471 + ], + [ + 835, + 476 + ], + [ + 828, + 476 + ], + [ + 823, + 472 + ], + [ + 821, + 470 + ], + [ + 813, + 464 + ], + [ + 809, + 458 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 781, + 436 + ], + [ + 810, + 436 + ], + [ + 815, + 439 + ], + [ + 816, + 451 + ], + [ + 817, + 464 + ], + [ + 817, + 468 + ], + [ + 816, + 470 + ], + [ + 813, + 474 + ], + [ + 810, + 476 + ], + [ + 807, + 476 + ], + [ + 801, + 478 + ], + [ + 798, + 475 + ], + [ + 787, + 463 + ], + [ + 769, + 455 + ], + [ + 743, + 452 + ], + [ + 742, + 446 + ], + [ + 749, + 437 + ], + [ + 754, + 436 + ], + [ + 757, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 783, + 447 + ], + [ + 794, + 447 + ], + [ + 800, + 448 + ], + [ + 803, + 453 + ], + [ + 808, + 459 + ], + [ + 812, + 468 + ], + [ + 810, + 474 + ], + [ + 801, + 475 + ], + [ + 798, + 477 + ], + [ + 794, + 478 + ], + [ + 780, + 476 + ], + [ + 761, + 463 + ], + [ + 751, + 455 + ], + [ + 751, + 450 + ], + [ + 753, + 447 + ], + [ + 763, + 446 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 801, + 479 + ], + [ + 797, + 473 + ], + [ + 794, + 466 + ], + [ + 783, + 466 + ], + [ + 774, + 472 + ], + [ + 773, + 477 + ], + [ + 782, + 482 + ], + [ + 798, + 480 + ], + [ + 801, + 480 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 761, + 385 + ], + [ + 763, + 418 + ], + [ + 765, + 453 + ], + [ + 765, + 462 + ], + [ + 769, + 462 + ], + [ + 769, + 444 + ], + [ + 769, + 420 + ], + [ + 768, + 400 + ], + [ + 768, + 381 + ], + [ + 772, + 381 + ], + [ + 823, + 256 + ], + [ + 814, + 185 + ], + [ + 785, + 89 + ], + [ + 737, + 81 + ], + [ + 687, + 96 + ], + [ + 637, + 134 + ], + [ + 614, + 170 + ], + [ + 601, + 208 + ], + [ + 594, + 261 + ], + [ + 591, + 290 + ], + [ + 599, + 322 + ], + [ + 614, + 329 + ], + [ + 626, + 334 + ], + [ + 642, + 336 + ], + [ + 666, + 334 + ], + [ + 674, + 330 + ], + [ + 708, + 334 + ], + [ + 743, + 359 + ], + [ + 756, + 371 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 829, + 72 + ], + [ + 859, + 70 + ], + [ + 857, + 76 + ], + [ + 856, + 78 + ], + [ + 836, + 79 + ], + [ + 832, + 78 + ], + [ + 829, + 77 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 900, + 182 + ], + [ + 919, + 180 + ], + [ + 919, + 185 + ], + [ + 915, + 187 + ], + [ + 900, + 187 + ], + [ + 898, + 186 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 934, + 241 + ], + [ + 949, + 241 + ], + [ + 948, + 244 + ], + [ + 943, + 245 + ], + [ + 935, + 245 + ], + [ + 934, + 245 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 906, + 290 + ], + [ + 895, + 290 + ], + [ + 895, + 294 + ], + [ + 901, + 294 + ], + [ + 906, + 295 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 734, + 452 + ], + [ + 749, + 450 + ], + [ + 760, + 450 + ], + [ + 766, + 451 + ], + [ + 773, + 454 + ], + [ + 780, + 455 + ], + [ + 784, + 460 + ], + [ + 785, + 464 + ], + [ + 785, + 470 + ], + [ + 784, + 477 + ], + [ + 780, + 479 + ], + [ + 773, + 480 + ], + [ + 771, + 483 + ], + [ + 766, + 486 + ], + [ + 762, + 486 + ], + [ + 754, + 479 + ], + [ + 743, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 726, + 449 + ], + [ + 743, + 454 + ], + [ + 755, + 455 + ], + [ + 760, + 459 + ], + [ + 763, + 460 + ], + [ + 769, + 460 + ], + [ + 771, + 465 + ], + [ + 776, + 476 + ], + [ + 774, + 479 + ], + [ + 765, + 479 + ], + [ + 760, + 481 + ], + [ + 756, + 485 + ], + [ + 753, + 485 + ], + [ + 744, + 481 + ], + [ + 741, + 472 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 681, + 436 + ], + [ + 708, + 436 + ], + [ + 740, + 436 + ], + [ + 751, + 436 + ], + [ + 752, + 457 + ], + [ + 757, + 472 + ], + [ + 757, + 478 + ], + [ + 752, + 482 + ], + [ + 742, + 486 + ], + [ + 736, + 486 + ], + [ + 710, + 471 + ], + [ + 670, + 463 + ], + [ + 657, + 455 + ], + [ + 651, + 446 + ], + [ + 656, + 439 + ], + [ + 661, + 437 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 676, + 419 + ], + [ + 661, + 421 + ], + [ + 645, + 421 + ], + [ + 630, + 425 + ], + [ + 621, + 432 + ], + [ + 617, + 438 + ], + [ + 613, + 438 + ], + [ + 600, + 434 + ], + [ + 591, + 439 + ], + [ + 601, + 452 + ], + [ + 627, + 456 + ], + [ + 643, + 456 + ], + [ + 660, + 456 + ], + [ + 668, + 453 + ], + [ + 673, + 450 + ], + [ + 671, + 445 + ], + [ + 668, + 438 + ], + [ + 673, + 435 + ], + [ + 680, + 433 + ], + [ + 683, + 425 + ], + [ + 680, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 715, + 449 + ], + [ + 684, + 447 + ], + [ + 663, + 447 + ], + [ + 651, + 449 + ], + [ + 645, + 458 + ], + [ + 671, + 474 + ], + [ + 699, + 477 + ], + [ + 713, + 477 + ], + [ + 715, + 485 + ], + [ + 727, + 486 + ], + [ + 731, + 484 + ], + [ + 734, + 479 + ], + [ + 734, + 472 + ], + [ + 729, + 462 + ], + [ + 725, + 453 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 736, + 494 + ], + [ + 724, + 494 + ], + [ + 707, + 495 + ], + [ + 695, + 492 + ], + [ + 699, + 485 + ], + [ + 712, + 480 + ], + [ + 724, + 479 + ], + [ + 738, + 487 + ], + [ + 741, + 489 + ], + [ + 735, + 491 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 711, + 389 + ], + [ + 713, + 468 + ], + [ + 711, + 477 + ], + [ + 699, + 476 + ], + [ + 699, + 445 + ], + [ + 697, + 418 + ], + [ + 697, + 407 + ], + [ + 696, + 379 + ], + [ + 690, + 379 + ], + [ + 689, + 358 + ], + [ + 696, + 317 + ], + [ + 664, + 308 + ], + [ + 652, + 317 + ], + [ + 565, + 154 + ], + [ + 548, + 0 + ], + [ + 797, + 0 + ], + [ + 800, + 0 + ], + [ + 801, + 5 + ], + [ + 802, + 15 + ], + [ + 802, + 22 + ], + [ + 811, + 31 + ], + [ + 813, + 40 + ], + [ + 806, + 52 + ], + [ + 802, + 70 + ], + [ + 768, + 269 + ], + [ + 725, + 325 + ], + [ + 707, + 357 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 645, + 464 + ], + [ + 649, + 455 + ], + [ + 655, + 451 + ], + [ + 670, + 450 + ], + [ + 686, + 450 + ], + [ + 697, + 451 + ], + [ + 705, + 461 + ], + [ + 710, + 472 + ], + [ + 711, + 483 + ], + [ + 710, + 485 + ], + [ + 706, + 490 + ], + [ + 699, + 492 + ], + [ + 698, + 495 + ], + [ + 691, + 497 + ], + [ + 683, + 496 + ], + [ + 676, + 491 + ], + [ + 676, + 491 + ], + [ + 647, + 488 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 625, + 448 + ], + [ + 643, + 448 + ], + [ + 652, + 450 + ], + [ + 660, + 461 + ], + [ + 665, + 472 + ], + [ + 667, + 487 + ], + [ + 667, + 493 + ], + [ + 663, + 496 + ], + [ + 654, + 493 + ], + [ + 648, + 492 + ], + [ + 642, + 494 + ], + [ + 630, + 491 + ], + [ + 604, + 480 + ], + [ + 589, + 469 + ], + [ + 586, + 461 + ], + [ + 587, + 453 + ], + [ + 594, + 450 + ], + [ + 602, + 449 + ], + [ + 617, + 448 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 637, + 502 + ], + [ + 621, + 498 + ], + [ + 621, + 487 + ], + [ + 638, + 489 + ], + [ + 648, + 491 + ], + [ + 657, + 493 + ], + [ + 658, + 498 + ], + [ + 659, + 502 + ], + [ + 655, + 507 + ], + [ + 647, + 507 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 622, + 348 + ], + [ + 623, + 434 + ], + [ + 621, + 473 + ], + [ + 634, + 481 + ], + [ + 635, + 462 + ], + [ + 635, + 440 + ], + [ + 635, + 409 + ], + [ + 637, + 363 + ], + [ + 637, + 333 + ], + [ + 657, + 308 + ], + [ + 665, + 0 + ], + [ + 437, + 0 + ], + [ + 439, + 11 + ], + [ + 441, + 18 + ], + [ + 428, + 16 + ], + [ + 422, + 25 + ], + [ + 427, + 53 + ], + [ + 439, + 91 + ], + [ + 476, + 163 + ], + [ + 489, + 209 + ], + [ + 503, + 253 + ], + [ + 512, + 271 + ], + [ + 512, + 288 + ], + [ + 482, + 296 + ], + [ + 493, + 311 + ], + [ + 519, + 311 + ], + [ + 521, + 314 + ], + [ + 510, + 336 + ], + [ + 507, + 349 + ], + [ + 526, + 358 + ], + [ + 540, + 351 + ], + [ + 554, + 348 + ], + [ + 571, + 344 + ], + [ + 583, + 328 + ], + [ + 596, + 315 + ], + [ + 609, + 315 + ], + [ + 615, + 330 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 575, + 456 + ], + [ + 591, + 453 + ], + [ + 609, + 460 + ], + [ + 620, + 464 + ], + [ + 630, + 468 + ], + [ + 637, + 479 + ], + [ + 640, + 492 + ], + [ + 634, + 499 + ], + [ + 620, + 502 + ], + [ + 618, + 508 + ], + [ + 608, + 511 + ], + [ + 600, + 509 + ], + [ + 587, + 497 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 497, + 464 + ], + [ + 498, + 457 + ], + [ + 503, + 453 + ], + [ + 521, + 450 + ], + [ + 557, + 451 + ], + [ + 575, + 450 + ], + [ + 585, + 452 + ], + [ + 593, + 464 + ], + [ + 598, + 475 + ], + [ + 600, + 496 + ], + [ + 601, + 504 + ], + [ + 595, + 506 + ], + [ + 581, + 505 + ], + [ + 527, + 496 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 546, + 519 + ], + [ + 560, + 518 + ], + [ + 576, + 517 + ], + [ + 593, + 516 + ], + [ + 602, + 514 + ], + [ + 598, + 505 + ], + [ + 579, + 499 + ], + [ + 571, + 491 + ], + [ + 555, + 491 + ], + [ + 535, + 491 + ], + [ + 520, + 500 + ], + [ + 519, + 512 + ], + [ + 529, + 519 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 538, + 366 + ], + [ + 539, + 399 + ], + [ + 540, + 442 + ], + [ + 543, + 488 + ], + [ + 545, + 494 + ], + [ + 556, + 494 + ], + [ + 554, + 478 + ], + [ + 552, + 449 + ], + [ + 551, + 418 + ], + [ + 551, + 384 + ], + [ + 550, + 354 + ], + [ + 553, + 337 + ], + [ + 562, + 328 + ], + [ + 625, + 90 + ], + [ + 547, + 31 + ], + [ + 477, + 43 + ], + [ + 411, + 78 + ], + [ + 391, + 109 + ], + [ + 390, + 161 + ], + [ + 405, + 243 + ], + [ + 427, + 261 + ], + [ + 446, + 272 + ], + [ + 468, + 273 + ], + [ + 493, + 268 + ], + [ + 508, + 267 + ], + [ + 526, + 292 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 546, + 519 + ], + [ + 560, + 518 + ], + [ + 576, + 517 + ], + [ + 593, + 516 + ], + [ + 602, + 514 + ], + [ + 598, + 505 + ], + [ + 579, + 499 + ], + [ + 571, + 491 + ], + [ + 555, + 491 + ], + [ + 535, + 491 + ], + [ + 520, + 500 + ], + [ + 519, + 512 + ], + [ + 529, + 519 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 566, + 510 + ], + [ + 572, + 497 + ], + [ + 569, + 490 + ], + [ + 560, + 478 + ], + [ + 552, + 478 + ], + [ + 547, + 475 + ], + [ + 555, + 470 + ], + [ + 561, + 464 + ], + [ + 553, + 461 + ], + [ + 549, + 466 + ], + [ + 539, + 465 + ], + [ + 534, + 468 + ], + [ + 522, + 479 + ], + [ + 522, + 491 + ], + [ + 529, + 505 + ], + [ + 539, + 505 + ], + [ + 546, + 507 + ], + [ + 557, + 510 + ], + [ + 565, + 506 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 476, + 455 + ], + [ + 499, + 455 + ], + [ + 508, + 459 + ], + [ + 521, + 465 + ], + [ + 537, + 469 + ], + [ + 541, + 479 + ], + [ + 547, + 490 + ], + [ + 547, + 503 + ], + [ + 547, + 505 + ], + [ + 539, + 508 + ], + [ + 534, + 512 + ], + [ + 533, + 520 + ], + [ + 523, + 522 + ], + [ + 513, + 522 + ], + [ + 499, + 506 + ], + [ + 451, + 466 + ], + [ + 448, + 460 + ], + [ + 454, + 456 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 452, + 486 + ], + [ + 477, + 478 + ], + [ + 491, + 476 + ], + [ + 504, + 480 + ], + [ + 513, + 488 + ], + [ + 516, + 496 + ], + [ + 519, + 505 + ], + [ + 513, + 520 + ], + [ + 505, + 525 + ], + [ + 492, + 524 + ], + [ + 477, + 513 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 428, + 453 + ], + [ + 462, + 453 + ], + [ + 473, + 459 + ], + [ + 486, + 472 + ], + [ + 489, + 480 + ], + [ + 491, + 491 + ], + [ + 498, + 501 + ], + [ + 500, + 506 + ], + [ + 499, + 514 + ], + [ + 492, + 520 + ], + [ + 486, + 525 + ], + [ + 474, + 525 + ], + [ + 450, + 519 + ], + [ + 428, + 510 + ], + [ + 402, + 491 + ], + [ + 340, + 465 + ], + [ + 350, + 457 + ], + [ + 369, + 453 + ], + [ + 388, + 450 + ], + [ + 402, + 450 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 418, + 534 + ], + [ + 435, + 536 + ], + [ + 453, + 537 + ], + [ + 469, + 535 + ], + [ + 478, + 533 + ], + [ + 478, + 527 + ], + [ + 477, + 519 + ], + [ + 471, + 513 + ], + [ + 457, + 510 + ], + [ + 446, + 506 + ], + [ + 434, + 506 + ], + [ + 428, + 503 + ], + [ + 415, + 507 + ], + [ + 398, + 519 + ], + [ + 385, + 529 + ], + [ + 388, + 537 + ], + [ + 391, + 538 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 380, + 277 + ], + [ + 381, + 300 + ], + [ + 382, + 320 + ], + [ + 389, + 353 + ], + [ + 395, + 400 + ], + [ + 398, + 421 + ], + [ + 400, + 451 + ], + [ + 401, + 467 + ], + [ + 403, + 487 + ], + [ + 408, + 489 + ], + [ + 410, + 471 + ], + [ + 413, + 444 + ], + [ + 409, + 408 + ], + [ + 408, + 378 + ], + [ + 403, + 335 + ], + [ + 397, + 311 + ], + [ + 394, + 286 + ], + [ + 394, + 284 + ], + [ + 401, + 273 + ], + [ + 416, + 265 + ], + [ + 441, + 252 + ], + [ + 446, + 103 + ], + [ + 446, + 55 + ], + [ + 435, + 40 + ], + [ + 422, + 39 + ], + [ + 408, + 51 + ], + [ + 401, + 56 + ], + [ + 392, + 44 + ], + [ + 386, + 32 + ], + [ + 367, + 25 + ], + [ + 366, + 20 + ], + [ + 384, + 14 + ], + [ + 390, + 0 + ], + [ + 254, + 0 + ], + [ + 239, + 33 + ], + [ + 229, + 95 + ], + [ + 228, + 135 + ], + [ + 223, + 150 + ], + [ + 211, + 152 + ], + [ + 201, + 149 + ], + [ + 199, + 162 + ], + [ + 194, + 181 + ], + [ + 196, + 194 + ], + [ + 181, + 194 + ], + [ + 185, + 203 + ], + [ + 184, + 210 + ], + [ + 170, + 212 + ], + [ + 171, + 242 + ], + [ + 177, + 256 + ], + [ + 188, + 272 + ], + [ + 205, + 266 + ], + [ + 212, + 247 + ], + [ + 213, + 238 + ], + [ + 237, + 238 + ], + [ + 259, + 234 + ], + [ + 269, + 231 + ], + [ + 291, + 228 + ], + [ + 298, + 229 + ], + [ + 297, + 242 + ], + [ + 292, + 249 + ], + [ + 279, + 255 + ], + [ + 277, + 265 + ], + [ + 298, + 270 + ], + [ + 311, + 268 + ], + [ + 319, + 268 + ], + [ + 339, + 276 + ], + [ + 352, + 279 + ], + [ + 367, + 271 + ], + [ + 376, + 269 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 418, + 534 + ], + [ + 435, + 536 + ], + [ + 453, + 537 + ], + [ + 469, + 535 + ], + [ + 478, + 533 + ], + [ + 478, + 527 + ], + [ + 477, + 519 + ], + [ + 471, + 513 + ], + [ + 457, + 510 + ], + [ + 446, + 506 + ], + [ + 434, + 506 + ], + [ + 428, + 503 + ], + [ + 415, + 507 + ], + [ + 398, + 519 + ], + [ + 385, + 529 + ], + [ + 388, + 537 + ], + [ + 391, + 538 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 238, + 480 + ], + [ + 256, + 468 + ], + [ + 266, + 466 + ], + [ + 279, + 463 + ], + [ + 295, + 461 + ], + [ + 307, + 459 + ], + [ + 310, + 453 + ], + [ + 325, + 450 + ], + [ + 360, + 448 + ], + [ + 381, + 449 + ], + [ + 394, + 451 + ], + [ + 401, + 460 + ], + [ + 402, + 463 + ], + [ + 411, + 472 + ], + [ + 424, + 485 + ], + [ + 425, + 494 + ], + [ + 427, + 509 + ], + [ + 427, + 518 + ], + [ + 427, + 522 + ], + [ + 402, + 526 + ], + [ + 393, + 529 + ], + [ + 391, + 539 + ], + [ + 382, + 545 + ], + [ + 374, + 547 + ], + [ + 364, + 547 + ], + [ + 358, + 546 + ], + [ + 350, + 548 + ], + [ + 337, + 549 + ], + [ + 328, + 547 + ], + [ + 322, + 539 + ], + [ + 279, + 540 + ], + [ + 234, + 544 + ], + [ + 186, + 547 + ], + [ + 193, + 543 + ], + [ + 190, + 522 + ], + [ + 195, + 507 + ], + [ + 206, + 500 + ], + [ + 218, + 496 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 290, + 398 + ], + [ + 295, + 544 + ], + [ + 299, + 545 + ], + [ + 294, + 393 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 291, + 344 + ], + [ + 284, + 350 + ], + [ + 283, + 362 + ], + [ + 286, + 376 + ], + [ + 292, + 384 + ], + [ + 298, + 385 + ], + [ + 305, + 383 + ], + [ + 307, + 369 + ], + [ + 305, + 356 + ], + [ + 303, + 349 + ], + [ + 298, + 345 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 310, + 408 + ], + [ + 307, + 384 + ], + [ + 284, + 384 + ], + [ + 286, + 409 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 256, + 557 + ], + [ + 260, + 538 + ], + [ + 264, + 531 + ], + [ + 331, + 534 + ], + [ + 337, + 535 + ], + [ + 334, + 550 + ], + [ + 337, + 559 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 168, + 314 + ], + [ + 166, + 353 + ], + [ + 160, + 370 + ], + [ + 172, + 466 + ], + [ + 180, + 523 + ], + [ + 180, + 548 + ], + [ + 183, + 554 + ], + [ + 201, + 552 + ], + [ + 201, + 518 + ], + [ + 203, + 505 + ], + [ + 203, + 484 + ], + [ + 201, + 456 + ], + [ + 200, + 402 + ], + [ + 197, + 338 + ], + [ + 192, + 299 + ], + [ + 186, + 265 + ], + [ + 183, + 205 + ], + [ + 179, + 172 + ], + [ + 173, + 146 + ], + [ + 171, + 125 + ], + [ + 186, + 124 + ], + [ + 206, + 139 + ], + [ + 219, + 148 + ], + [ + 239, + 138 + ], + [ + 275, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 84 + ], + [ + 10, + 92 + ], + [ + 21, + 92 + ], + [ + 42, + 98 + ], + [ + 58, + 100 + ], + [ + 84, + 103 + ], + [ + 87, + 95 + ], + [ + 96, + 92 + ], + [ + 123, + 95 + ], + [ + 127, + 98 + ], + [ + 127, + 108 + ], + [ + 118, + 118 + ], + [ + 114, + 133 + ], + [ + 109, + 137 + ], + [ + 101, + 149 + ], + [ + 110, + 164 + ], + [ + 136, + 154 + ], + [ + 151, + 156 + ], + [ + 151, + 176 + ], + [ + 151, + 204 + ], + [ + 138, + 205 + ], + [ + 135, + 232 + ], + [ + 154, + 244 + ], + [ + 158, + 253 + ], + [ + 159, + 263 + ], + [ + 159, + 273 + ], + [ + 153, + 274 + ], + [ + 158, + 295 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 54, + 533 + ], + [ + 53, + 522 + ], + [ + 53, + 512 + ], + [ + 59, + 502 + ], + [ + 64, + 494 + ], + [ + 65, + 484 + ], + [ + 58, + 477 + ], + [ + 40, + 477 + ], + [ + 41, + 479 + ], + [ + 34, + 483 + ], + [ + 26, + 481 + ], + [ + 16, + 483 + ], + [ + 8, + 490 + ], + [ + 3, + 494 + ], + [ + 0, + 494 + ], + [ + 0, + 527 + ], + [ + 14, + 539 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1020, + 436 + ], + [ + 1031, + 439 + ], + [ + 1038, + 439 + ], + [ + 1041, + 443 + ], + [ + 1045, + 446 + ], + [ + 1045, + 453 + ], + [ + 1043, + 456 + ], + [ + 1033, + 458 + ], + [ + 1024, + 454 + ], + [ + 1019, + 455 + ], + [ + 1016, + 450 + ], + [ + 1015, + 439 + ], + [ + 1015, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1001, + 437 + ], + [ + 1004, + 437 + ], + [ + 1010, + 437 + ], + [ + 1017, + 438 + ], + [ + 1021, + 445 + ], + [ + 1022, + 450 + ], + [ + 1022, + 455 + ], + [ + 1019, + 458 + ], + [ + 1013, + 458 + ], + [ + 1003, + 453 + ], + [ + 1003, + 448 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1049, + 456 + ], + [ + 1048, + 460 + ], + [ + 1043, + 460 + ], + [ + 1043, + 455 + ], + [ + 1043, + 449 + ], + [ + 1043, + 442 + ], + [ + 1045, + 435 + ], + [ + 1052, + 431 + ], + [ + 1065, + 432 + ], + [ + 1069, + 432 + ], + [ + 1073, + 434 + ], + [ + 1074, + 440 + ], + [ + 1075, + 448 + ], + [ + 1075, + 454 + ], + [ + 1074, + 458 + ], + [ + 1073, + 460 + ], + [ + 1070, + 459 + ], + [ + 1069, + 456 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1005, + 418 + ], + [ + 1003, + 436 + ], + [ + 995, + 435 + ], + [ + 995, + 418 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 990, + 450 + ], + [ + 985, + 440 + ], + [ + 982, + 428 + ], + [ + 987, + 426 + ], + [ + 995, + 426 + ], + [ + 1003, + 426 + ], + [ + 1008, + 427 + ], + [ + 1009, + 435 + ], + [ + 1009, + 436 + ], + [ + 1010, + 448 + ], + [ + 1011, + 462 + ], + [ + 1008, + 464 + ], + [ + 1005, + 462 + ], + [ + 1004, + 461 + ], + [ + 1002, + 461 + ], + [ + 991, + 457 + ], + [ + 982, + 449 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 971, + 462 + ], + [ + 970, + 467 + ], + [ + 965, + 468 + ], + [ + 962, + 462 + ], + [ + 962, + 453 + ], + [ + 962, + 446 + ], + [ + 965, + 438 + ], + [ + 966, + 432 + ], + [ + 974, + 431 + ], + [ + 989, + 431 + ], + [ + 996, + 432 + ], + [ + 1001, + 439 + ], + [ + 1003, + 450 + ], + [ + 1003, + 459 + ], + [ + 1002, + 466 + ], + [ + 997, + 468 + ], + [ + 995, + 463 + ], + [ + 995, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 955, + 489 + ], + [ + 948, + 493 + ], + [ + 943, + 491 + ], + [ + 942, + 484 + ], + [ + 905, + 486 + ], + [ + 895, + 486 + ], + [ + 894, + 491 + ], + [ + 890, + 492 + ], + [ + 887, + 496 + ], + [ + 879, + 496 + ], + [ + 874, + 493 + ], + [ + 873, + 481 + ], + [ + 873, + 472 + ], + [ + 873, + 458 + ], + [ + 873, + 456 + ], + [ + 874, + 454 + ], + [ + 875, + 451 + ], + [ + 879, + 442 + ], + [ + 879, + 435 + ], + [ + 885, + 426 + ], + [ + 903, + 425 + ], + [ + 920, + 424 + ], + [ + 932, + 424 + ], + [ + 945, + 430 + ], + [ + 946, + 433 + ], + [ + 948, + 439 + ], + [ + 950, + 444 + ], + [ + 952, + 440 + ], + [ + 958, + 440 + ], + [ + 958, + 446 + ], + [ + 954, + 447 + ], + [ + 956, + 457 + ], + [ + 957, + 470 + ], + [ + 957, + 480 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1434, + 589 + ], + [ + 1347, + 590 + ], + [ + 1318, + 589 + ], + [ + 1276, + 567 + ], + [ + 1269, + 552 + ], + [ + 1289, + 525 + ], + [ + 1292, + 514 + ], + [ + 1284, + 501 + ], + [ + 1300, + 481 + ], + [ + 1320, + 486 + ], + [ + 1335, + 489 + ], + [ + 1349, + 487 + ], + [ + 1356, + 480 + ], + [ + 1388, + 479 + ], + [ + 1411, + 473 + ], + [ + 1431, + 457 + ], + [ + 1456, + 450 + ], + [ + 1488, + 456 + ], + [ + 1502, + 456 + ], + [ + 1544, + 459 + ], + [ + 1571, + 463 + ], + [ + 1577, + 491 + ], + [ + 1548, + 533 + ], + [ + 1508, + 573 + ], + [ + 1500, + 582 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1431, + 447 + ], + [ + 1414, + 442 + ], + [ + 1400, + 448 + ], + [ + 1391, + 458 + ], + [ + 1383, + 474 + ], + [ + 1377, + 485 + ], + [ + 1377, + 465 + ], + [ + 1375, + 451 + ], + [ + 1369, + 448 + ], + [ + 1365, + 460 + ], + [ + 1367, + 484 + ], + [ + 1369, + 507 + ], + [ + 1372, + 536 + ], + [ + 1377, + 547 + ], + [ + 1386, + 548 + ], + [ + 1388, + 539 + ], + [ + 1386, + 519 + ], + [ + 1384, + 500 + ], + [ + 1383, + 492 + ], + [ + 1391, + 482 + ], + [ + 1394, + 470 + ], + [ + 1399, + 461 + ], + [ + 1402, + 454 + ], + [ + 1409, + 450 + ], + [ + 1414, + 450 + ], + [ + 1419, + 451 + ], + [ + 1426, + 453 + ], + [ + 1432, + 458 + ], + [ + 1433, + 458 + ], + [ + 1438, + 454 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1564, + 456 + ], + [ + 1558, + 449 + ], + [ + 1543, + 438 + ], + [ + 1522, + 442 + ], + [ + 1507, + 454 + ], + [ + 1503, + 464 + ], + [ + 1502, + 473 + ], + [ + 1511, + 477 + ], + [ + 1514, + 471 + ], + [ + 1514, + 460 + ], + [ + 1517, + 449 + ], + [ + 1532, + 448 + ], + [ + 1537, + 448 + ], + [ + 1552, + 455 + ], + [ + 1554, + 462 + ], + [ + 1562, + 468 + ], + [ + 1575, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1527, + 596 + ], + [ + 1499, + 593 + ], + [ + 1475, + 583 + ], + [ + 1461, + 568 + ], + [ + 1455, + 538 + ], + [ + 1454, + 531 + ], + [ + 1460, + 529 + ], + [ + 1469, + 491 + ], + [ + 1475, + 477 + ], + [ + 1487, + 471 + ], + [ + 1602, + 447 + ], + [ + 1691, + 422 + ], + [ + 1719, + 408 + ], + [ + 1758, + 384 + ], + [ + 1784, + 361 + ], + [ + 1805, + 353 + ], + [ + 1855, + 338 + ], + [ + 1901, + 335 + ], + [ + 1926, + 336 + ], + [ + 1947, + 349 + ], + [ + 1949, + 384 + ], + [ + 1917, + 440 + ], + [ + 1663, + 593 + ], + [ + 1622, + 613 + ], + [ + 1608, + 622 + ], + [ + 1584, + 621 + ], + [ + 1560, + 611 + ], + [ + 1547, + 598 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1902, + 671 + ], + [ + 1869, + 671 + ], + [ + 1836, + 664 + ], + [ + 1808, + 658 + ], + [ + 1782, + 663 + ], + [ + 1754, + 663 + ], + [ + 1717, + 643 + ], + [ + 1705, + 625 + ], + [ + 1694, + 621 + ], + [ + 1649, + 617 + ], + [ + 1591, + 612 + ], + [ + 1574, + 596 + ], + [ + 1564, + 562 + ], + [ + 1567, + 534 + ], + [ + 1574, + 503 + ], + [ + 1584, + 463 + ], + [ + 1590, + 442 + ], + [ + 1602, + 436 + ], + [ + 1750, + 407 + ], + [ + 1846, + 356 + ], + [ + 1905, + 337 + ], + [ + 1972, + 326 + ], + [ + 2017, + 323 + ], + [ + 2048, + 320 + ], + [ + 2048, + 320 + ], + [ + 2048, + 613 + ], + [ + 2022, + 611 + ], + [ + 1988, + 617 + ], + [ + 1970, + 631 + ], + [ + 1953, + 657 + ], + [ + 1930, + 669 + ], + [ + 1912, + 671 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 167, + 585 + ], + [ + 227, + 581 + ], + [ + 261, + 571 + ], + [ + 288, + 567 + ], + [ + 293, + 563 + ], + [ + 281, + 558 + ], + [ + 271, + 557 + ], + [ + 268, + 547 + ], + [ + 262, + 535 + ], + [ + 249, + 537 + ], + [ + 240, + 534 + ], + [ + 224, + 534 + ], + [ + 207, + 533 + ], + [ + 189, + 535 + ], + [ + 171, + 536 + ], + [ + 150, + 538 + ], + [ + 130, + 539 + ], + [ + 103, + 542 + ], + [ + 84, + 551 + ], + [ + 77, + 564 + ], + [ + 91, + 574 + ], + [ + 107, + 583 + ], + [ + 126, + 583 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 71, + 535 + ], + [ + 69, + 521 + ], + [ + 70, + 514 + ], + [ + 88, + 518 + ], + [ + 95, + 519 + ], + [ + 104, + 516 + ], + [ + 103, + 509 + ], + [ + 101, + 504 + ], + [ + 116, + 499 + ], + [ + 121, + 500 + ], + [ + 121, + 507 + ], + [ + 122, + 519 + ], + [ + 133, + 526 + ], + [ + 146, + 524 + ], + [ + 153, + 519 + ], + [ + 145, + 511 + ], + [ + 146, + 502 + ], + [ + 159, + 502 + ], + [ + 167, + 511 + ], + [ + 182, + 509 + ], + [ + 190, + 508 + ], + [ + 194, + 514 + ], + [ + 189, + 526 + ], + [ + 201, + 524 + ], + [ + 209, + 521 + ], + [ + 215, + 512 + ], + [ + 218, + 511 + ], + [ + 208, + 509 + ], + [ + 211, + 497 + ], + [ + 223, + 496 + ], + [ + 244, + 491 + ], + [ + 256, + 487 + ], + [ + 246, + 502 + ], + [ + 237, + 505 + ], + [ + 244, + 512 + ], + [ + 249, + 515 + ], + [ + 255, + 517 + ], + [ + 269, + 530 + ], + [ + 272, + 546 + ], + [ + 268, + 556 + ], + [ + 251, + 558 + ], + [ + 238, + 555 + ], + [ + 229, + 542 + ], + [ + 222, + 534 + ], + [ + 216, + 535 + ], + [ + 210, + 538 + ], + [ + 211, + 550 + ], + [ + 203, + 560 + ], + [ + 193, + 566 + ], + [ + 166, + 565 + ], + [ + 158, + 558 + ], + [ + 156, + 545 + ], + [ + 154, + 544 + ], + [ + 147, + 545 + ], + [ + 145, + 543 + ], + [ + 138, + 553 + ], + [ + 132, + 560 + ], + [ + 108, + 563 + ], + [ + 82, + 544 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 57, + 605 + ], + [ + 68, + 598 + ], + [ + 84, + 592 + ], + [ + 104, + 588 + ], + [ + 114, + 582 + ], + [ + 114, + 549 + ], + [ + 103, + 537 + ], + [ + 86, + 532 + ], + [ + 16, + 519 + ], + [ + 15, + 512 + ], + [ + 8, + 506 + ], + [ + 0, + 505 + ], + [ + 0, + 599 + ], + [ + 18, + 610 + ], + [ + 36, + 609 + ], + [ + 48, + 608 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 37, + 585 + ], + [ + 36, + 618 + ], + [ + 4, + 618 + ], + [ + 0, + 616 + ], + [ + 0, + 584 + ], + [ + 0, + 581 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..ce69044513eb80cc6d2868d7374fb7ca7ee110ca Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..893dabc15c34214dcb8050b3e83a6b009af98b1d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..94506f85a0f7e5b0fc406c4f256ba5077a27e704 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000137_000019_gtFine_polygons.json @@ -0,0 +1,6469 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 789, + 48 + ], + [ + 1020, + 339 + ], + [ + 1112, + 409 + ], + [ + 1125, + 428 + ], + [ + 1161, + 430 + ], + [ + 1356, + 63 + ], + [ + 1370, + 0 + ], + [ + 761, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1633, + 670 + ], + [ + 1271, + 478 + ], + [ + 1156, + 467 + ], + [ + 1145, + 463 + ], + [ + 1142, + 456 + ], + [ + 1114, + 453 + ], + [ + 1050, + 455 + ], + [ + 923, + 462 + ], + [ + 521, + 501 + ], + [ + 0, + 562 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 703 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2041, + 801 + ], + [ + 1835, + 765 + ], + [ + 1592, + 715 + ], + [ + 1520, + 679 + ], + [ + 1447, + 630 + ], + [ + 1500, + 588 + ], + [ + 1349, + 530 + ], + [ + 1328, + 535 + ], + [ + 1289, + 535 + ], + [ + 1269, + 529 + ], + [ + 1266, + 522 + ], + [ + 1286, + 514 + ], + [ + 1322, + 501 + ], + [ + 1384, + 490 + ], + [ + 2023, + 505 + ], + [ + 2048, + 519 + ], + [ + 2048, + 804 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1108, + 240 + ], + [ + 1106, + 374 + ], + [ + 1120, + 377 + ], + [ + 1121, + 382 + ], + [ + 1128, + 382 + ], + [ + 1128, + 417 + ], + [ + 1136, + 418 + ], + [ + 1140, + 408 + ], + [ + 1143, + 403 + ], + [ + 1148, + 401 + ], + [ + 1148, + 383 + ], + [ + 1155, + 383 + ], + [ + 1156, + 389 + ], + [ + 1161, + 387 + ], + [ + 1161, + 357 + ], + [ + 1168, + 352 + ], + [ + 1173, + 352 + ], + [ + 1191, + 327 + ], + [ + 1203, + 281 + ], + [ + 1202, + 230 + ], + [ + 1231, + 145 + ], + [ + 1229, + 84 + ], + [ + 1247, + 37 + ], + [ + 1305, + 36 + ], + [ + 1305, + 33 + ], + [ + 1315, + 27 + ], + [ + 1314, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 573 + ], + [ + 2000, + 568 + ], + [ + 1937, + 560 + ], + [ + 1889, + 557 + ], + [ + 1778, + 552 + ], + [ + 1426, + 504 + ], + [ + 1257, + 471 + ], + [ + 1169, + 461 + ], + [ + 1142, + 451 + ], + [ + 1127, + 452 + ], + [ + 1096, + 450 + ], + [ + 1085, + 450 + ], + [ + 1072, + 452 + ], + [ + 1057, + 452 + ], + [ + 1009, + 458 + ], + [ + 877, + 467 + ], + [ + 602, + 490 + ], + [ + 23, + 536 + ], + [ + 0, + 554 + ], + [ + 0, + 0 + ], + [ + 898, + 0 + ], + [ + 901, + 3 + ], + [ + 915, + 27 + ], + [ + 911, + 32 + ], + [ + 907, + 35 + ], + [ + 907, + 78 + ], + [ + 923, + 79 + ], + [ + 924, + 89 + ], + [ + 927, + 89 + ], + [ + 926, + 92 + ], + [ + 956, + 94 + ], + [ + 962, + 102 + ], + [ + 962, + 150 + ], + [ + 992, + 151 + ], + [ + 1006, + 174 + ], + [ + 1006, + 185 + ], + [ + 1015, + 187 + ], + [ + 1030, + 210 + ], + [ + 1029, + 232 + ], + [ + 1029, + 237 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1122, + 353 + ], + [ + 1120, + 351 + ], + [ + 1116, + 351 + ], + [ + 1113, + 352 + ], + [ + 1114, + 354 + ], + [ + 1115, + 355 + ], + [ + 1117, + 356 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1132, + 313 + ], + [ + 1132, + 319 + ], + [ + 1121, + 318 + ], + [ + 1121, + 314 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1121, + 264 + ], + [ + 1121, + 270 + ], + [ + 1136, + 270 + ], + [ + 1135, + 263 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1121, + 195 + ], + [ + 1121, + 203 + ], + [ + 1139, + 203 + ], + [ + 1138, + 196 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1100, + 67 + ], + [ + 1100, + 76 + ], + [ + 1126, + 75 + ], + [ + 1124, + 68 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1142, + 452 + ], + [ + 1139, + 448 + ], + [ + 1135, + 448 + ], + [ + 1132, + 452 + ], + [ + 1132, + 458 + ], + [ + 1139, + 458 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1133, + 440 + ], + [ + 1131, + 438 + ], + [ + 1123, + 439 + ], + [ + 1120, + 445 + ], + [ + 1121, + 454 + ], + [ + 1128, + 457 + ], + [ + 1134, + 457 + ], + [ + 1134, + 453 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1100, + 446 + ], + [ + 1096, + 439 + ], + [ + 1092, + 435 + ], + [ + 1081, + 432 + ], + [ + 1076, + 435 + ], + [ + 1071, + 440 + ], + [ + 1075, + 455 + ], + [ + 1081, + 466 + ], + [ + 1089, + 462 + ], + [ + 1098, + 458 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1102, + 419 + ], + [ + 1103, + 399 + ], + [ + 1107, + 399 + ], + [ + 1108, + 420 + ] + ] + }, + { + "label": "train", + "polygon": [ + [ + 1096, + 470 + ], + [ + 1095, + 460 + ], + [ + 1095, + 445 + ], + [ + 1096, + 432 + ], + [ + 1096, + 430 + ], + [ + 1097, + 428 + ], + [ + 1120, + 429 + ], + [ + 1124, + 436 + ], + [ + 1125, + 449 + ], + [ + 1125, + 465 + ], + [ + 1125, + 471 + ], + [ + 1121, + 471 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1149, + 451 + ], + [ + 1145, + 443 + ], + [ + 1144, + 432 + ], + [ + 1139, + 428 + ], + [ + 1130, + 425 + ], + [ + 1128, + 414 + ], + [ + 1132, + 416 + ], + [ + 1132, + 414 + ], + [ + 1134, + 407 + ], + [ + 1141, + 406 + ], + [ + 1143, + 399 + ], + [ + 1143, + 391 + ], + [ + 1147, + 384 + ], + [ + 1154, + 383 + ], + [ + 1155, + 381 + ], + [ + 1154, + 374 + ], + [ + 1157, + 371 + ], + [ + 1160, + 370 + ], + [ + 1153, + 368 + ], + [ + 1151, + 364 + ], + [ + 1162, + 364 + ], + [ + 1157, + 357 + ], + [ + 1160, + 353 + ], + [ + 1171, + 351 + ], + [ + 1181, + 351 + ], + [ + 1186, + 363 + ], + [ + 1189, + 373 + ], + [ + 1194, + 382 + ], + [ + 1195, + 397 + ], + [ + 1192, + 408 + ], + [ + 1187, + 419 + ], + [ + 1186, + 424 + ], + [ + 1185, + 428 + ], + [ + 1180, + 437 + ], + [ + 1180, + 455 + ], + [ + 1166, + 457 + ], + [ + 1150, + 457 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1151, + 401 + ], + [ + 1151, + 387 + ], + [ + 1145, + 388 + ], + [ + 1145, + 401 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1134, + 476 + ], + [ + 1130, + 473 + ], + [ + 1130, + 468 + ], + [ + 1131, + 461 + ], + [ + 1133, + 455 + ], + [ + 1140, + 452 + ], + [ + 1144, + 450 + ], + [ + 1151, + 452 + ], + [ + 1155, + 455 + ], + [ + 1152, + 469 + ], + [ + 1147, + 475 + ], + [ + 1141, + 477 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1190, + 447 + ], + [ + 1189, + 427 + ], + [ + 1190, + 422 + ], + [ + 1198, + 420 + ], + [ + 1206, + 418 + ], + [ + 1211, + 413 + ], + [ + 1215, + 407 + ], + [ + 1219, + 403 + ], + [ + 1227, + 401 + ], + [ + 1229, + 399 + ], + [ + 1231, + 399 + ], + [ + 1239, + 404 + ], + [ + 1241, + 408 + ], + [ + 1239, + 411 + ], + [ + 1242, + 412 + ], + [ + 1257, + 412 + ], + [ + 1250, + 403 + ], + [ + 1245, + 398 + ], + [ + 1239, + 394 + ], + [ + 1233, + 382 + ], + [ + 1205, + 272 + ], + [ + 1194, + 272 + ], + [ + 1189, + 272 + ], + [ + 1178, + 275 + ], + [ + 1172, + 281 + ], + [ + 1162, + 291 + ], + [ + 1159, + 297 + ], + [ + 1166, + 302 + ], + [ + 1162, + 307 + ], + [ + 1160, + 311 + ], + [ + 1158, + 316 + ], + [ + 1159, + 324 + ], + [ + 1162, + 324 + ], + [ + 1152, + 326 + ], + [ + 1147, + 326 + ], + [ + 1146, + 330 + ], + [ + 1152, + 334 + ], + [ + 1159, + 335 + ], + [ + 1166, + 333 + ], + [ + 1168, + 335 + ], + [ + 1166, + 337 + ], + [ + 1162, + 339 + ], + [ + 1165, + 344 + ], + [ + 1169, + 352 + ], + [ + 1173, + 381 + ], + [ + 1185, + 428 + ], + [ + 1186, + 448 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1168, + 430 + ], + [ + 1169, + 438 + ], + [ + 1172, + 437 + ], + [ + 1170, + 444 + ], + [ + 1187, + 444 + ], + [ + 1189, + 432 + ], + [ + 1184, + 431 + ], + [ + 1184, + 420 + ], + [ + 1184, + 419 + ], + [ + 1177, + 419 + ], + [ + 1177, + 428 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1185, + 446 + ], + [ + 1185, + 431 + ], + [ + 1180, + 432 + ], + [ + 1180, + 447 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1195, + 446 + ], + [ + 1194, + 427 + ], + [ + 1194, + 420 + ], + [ + 1205, + 372 + ], + [ + 1197, + 348 + ], + [ + 1178, + 353 + ], + [ + 1183, + 402 + ], + [ + 1189, + 421 + ], + [ + 1191, + 424 + ], + [ + 1193, + 449 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1200, + 445 + ], + [ + 1200, + 429 + ], + [ + 1200, + 418 + ], + [ + 1201, + 406 + ], + [ + 1193, + 334 + ], + [ + 1186, + 317 + ], + [ + 1206, + 306 + ], + [ + 1233, + 296 + ], + [ + 1246, + 302 + ], + [ + 1255, + 325 + ], + [ + 1261, + 366 + ], + [ + 1261, + 370 + ], + [ + 1260, + 380 + ], + [ + 1264, + 385 + ], + [ + 1257, + 388 + ], + [ + 1252, + 389 + ], + [ + 1246, + 387 + ], + [ + 1240, + 389 + ], + [ + 1239, + 390 + ], + [ + 1235, + 391 + ], + [ + 1233, + 386 + ], + [ + 1235, + 384 + ], + [ + 1214, + 397 + ], + [ + 1204, + 415 + ], + [ + 1202, + 423 + ], + [ + 1202, + 452 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1211, + 439 + ], + [ + 1211, + 425 + ], + [ + 1209, + 409 + ], + [ + 1206, + 336 + ], + [ + 1198, + 267 + ], + [ + 1193, + 267 + ], + [ + 1192, + 263 + ], + [ + 1193, + 260 + ], + [ + 1191, + 258 + ], + [ + 1187, + 256 + ], + [ + 1183, + 255 + ], + [ + 1181, + 251 + ], + [ + 1187, + 246 + ], + [ + 1195, + 247 + ], + [ + 1200, + 247 + ], + [ + 1203, + 242 + ], + [ + 1207, + 239 + ], + [ + 1214, + 239 + ], + [ + 1208, + 237 + ], + [ + 1207, + 233 + ], + [ + 1207, + 227 + ], + [ + 1208, + 221 + ], + [ + 1211, + 217 + ], + [ + 1218, + 218 + ], + [ + 1224, + 216 + ], + [ + 1229, + 214 + ], + [ + 1251, + 276 + ], + [ + 1232, + 375 + ], + [ + 1214, + 403 + ], + [ + 1214, + 413 + ], + [ + 1215, + 443 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1219, + 414 + ], + [ + 1219, + 433 + ], + [ + 1207, + 432 + ], + [ + 1208, + 414 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1226, + 438 + ], + [ + 1226, + 428 + ], + [ + 1225, + 405 + ], + [ + 1227, + 343 + ], + [ + 1220, + 279 + ], + [ + 1226, + 263 + ], + [ + 1223, + 247 + ], + [ + 1237, + 229 + ], + [ + 1255, + 233 + ], + [ + 1301, + 241 + ], + [ + 1303, + 258 + ], + [ + 1303, + 273 + ], + [ + 1306, + 290 + ], + [ + 1295, + 326 + ], + [ + 1287, + 340 + ], + [ + 1283, + 351 + ], + [ + 1283, + 358 + ], + [ + 1284, + 364 + ], + [ + 1280, + 369 + ], + [ + 1274, + 363 + ], + [ + 1268, + 364 + ], + [ + 1263, + 369 + ], + [ + 1255, + 375 + ], + [ + 1245, + 384 + ], + [ + 1230, + 391 + ], + [ + 1231, + 444 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1248, + 434 + ], + [ + 1249, + 418 + ], + [ + 1250, + 394 + ], + [ + 1252, + 377 + ], + [ + 1231, + 226 + ], + [ + 1226, + 216 + ], + [ + 1223, + 199 + ], + [ + 1222, + 183 + ], + [ + 1230, + 172 + ], + [ + 1235, + 161 + ], + [ + 1252, + 143 + ], + [ + 1289, + 135 + ], + [ + 1313, + 144 + ], + [ + 1324, + 184 + ], + [ + 1322, + 263 + ], + [ + 1326, + 274 + ], + [ + 1326, + 284 + ], + [ + 1315, + 291 + ], + [ + 1316, + 296 + ], + [ + 1321, + 302 + ], + [ + 1327, + 306 + ], + [ + 1335, + 310 + ], + [ + 1341, + 315 + ], + [ + 1340, + 320 + ], + [ + 1334, + 321 + ], + [ + 1334, + 331 + ], + [ + 1336, + 338 + ], + [ + 1334, + 342 + ], + [ + 1328, + 343 + ], + [ + 1321, + 338 + ], + [ + 1316, + 336 + ], + [ + 1305, + 343 + ], + [ + 1306, + 351 + ], + [ + 1299, + 353 + ], + [ + 1295, + 356 + ], + [ + 1288, + 354 + ], + [ + 1276, + 351 + ], + [ + 1258, + 370 + ], + [ + 1257, + 438 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1265, + 403 + ], + [ + 1262, + 397 + ], + [ + 1263, + 391 + ], + [ + 1259, + 391 + ], + [ + 1259, + 387 + ], + [ + 1264, + 385 + ], + [ + 1270, + 387 + ], + [ + 1271, + 391 + ], + [ + 1266, + 394 + ], + [ + 1271, + 396 + ], + [ + 1273, + 397 + ], + [ + 1270, + 403 + ], + [ + 1267, + 406 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1278, + 418 + ], + [ + 1266, + 400 + ], + [ + 1257, + 419 + ], + [ + 1257, + 420 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1296, + 387 + ], + [ + 1296, + 393 + ], + [ + 1293, + 399 + ], + [ + 1291, + 402 + ], + [ + 1283, + 401 + ], + [ + 1276, + 398 + ], + [ + 1273, + 391 + ], + [ + 1274, + 385 + ], + [ + 1278, + 382 + ], + [ + 1282, + 379 + ], + [ + 1285, + 376 + ], + [ + 1283, + 375 + ], + [ + 1279, + 375 + ], + [ + 1281, + 370 + ], + [ + 1284, + 367 + ], + [ + 1288, + 367 + ], + [ + 1293, + 370 + ], + [ + 1297, + 371 + ], + [ + 1305, + 371 + ], + [ + 1309, + 371 + ], + [ + 1309, + 368 + ], + [ + 1308, + 365 + ], + [ + 1313, + 364 + ], + [ + 1316, + 364 + ], + [ + 1315, + 367 + ], + [ + 1313, + 368 + ], + [ + 1313, + 372 + ], + [ + 1315, + 376 + ], + [ + 1316, + 376 + ], + [ + 1319, + 374 + ], + [ + 1324, + 374 + ], + [ + 1329, + 377 + ], + [ + 1331, + 384 + ], + [ + 1333, + 390 + ], + [ + 1329, + 395 + ], + [ + 1326, + 398 + ], + [ + 1319, + 399 + ], + [ + 1314, + 398 + ], + [ + 1310, + 394 + ], + [ + 1307, + 393 + ], + [ + 1307, + 395 + ], + [ + 1307, + 397 + ], + [ + 1302, + 396 + ], + [ + 1303, + 392 + ], + [ + 1303, + 390 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1291, + 504 + ], + [ + 1248, + 509 + ], + [ + 1247, + 515 + ], + [ + 1426, + 625 + ], + [ + 1509, + 606 + ], + [ + 1365, + 508 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1291, + 504 + ], + [ + 1248, + 509 + ], + [ + 1247, + 515 + ], + [ + 1426, + 625 + ], + [ + 1509, + 606 + ], + [ + 1365, + 508 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1287, + 427 + ], + [ + 1287, + 381 + ], + [ + 1290, + 380 + ], + [ + 1293, + 430 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1284, + 420 + ], + [ + 1281, + 411 + ], + [ + 1281, + 402 + ], + [ + 1284, + 402 + ], + [ + 1287, + 406 + ], + [ + 1287, + 418 + ], + [ + 1287, + 423 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1324, + 438 + ], + [ + 1325, + 401 + ], + [ + 1328, + 400 + ], + [ + 1328, + 440 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1367, + 443 + ], + [ + 1364, + 443 + ], + [ + 1364, + 386 + ], + [ + 1366, + 386 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1371, + 339 + ], + [ + 1354, + 340 + ], + [ + 1354, + 393 + ], + [ + 1371, + 393 + ], + [ + 1371, + 393 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1361, + 448 + ], + [ + 1360, + 433 + ], + [ + 1357, + 364 + ], + [ + 1356, + 341 + ], + [ + 1354, + 302 + ], + [ + 1352, + 279 + ], + [ + 1353, + 263 + ], + [ + 1358, + 262 + ], + [ + 1368, + 266 + ], + [ + 1378, + 276 + ], + [ + 1388, + 284 + ], + [ + 1403, + 293 + ], + [ + 1440, + 291 + ], + [ + 1468, + 248 + ], + [ + 1478, + 231 + ], + [ + 1484, + 216 + ], + [ + 1493, + 210 + ], + [ + 1497, + 204 + ], + [ + 1491, + 197 + ], + [ + 1480, + 193 + ], + [ + 1471, + 188 + ], + [ + 1477, + 177 + ], + [ + 1488, + 163 + ], + [ + 1494, + 137 + ], + [ + 1477, + 137 + ], + [ + 1467, + 135 + ], + [ + 1462, + 129 + ], + [ + 1463, + 104 + ], + [ + 1461, + 92 + ], + [ + 1452, + 83 + ], + [ + 1456, + 69 + ], + [ + 1449, + 48 + ], + [ + 1436, + 40 + ], + [ + 1425, + 35 + ], + [ + 1428, + 24 + ], + [ + 1412, + 17 + ], + [ + 1402, + 24 + ], + [ + 1395, + 28 + ], + [ + 1390, + 22 + ], + [ + 1385, + 19 + ], + [ + 1356, + 24 + ], + [ + 1349, + 27 + ], + [ + 1343, + 25 + ], + [ + 1343, + 13 + ], + [ + 1333, + 14 + ], + [ + 1316, + 23 + ], + [ + 1303, + 31 + ], + [ + 1296, + 39 + ], + [ + 1293, + 37 + ], + [ + 1293, + 30 + ], + [ + 1291, + 26 + ], + [ + 1281, + 33 + ], + [ + 1273, + 42 + ], + [ + 1272, + 46 + ], + [ + 1281, + 49 + ], + [ + 1289, + 53 + ], + [ + 1290, + 56 + ], + [ + 1285, + 58 + ], + [ + 1279, + 58 + ], + [ + 1270, + 55 + ], + [ + 1257, + 64 + ], + [ + 1225, + 83 + ], + [ + 1222, + 88 + ], + [ + 1227, + 91 + ], + [ + 1241, + 89 + ], + [ + 1242, + 95 + ], + [ + 1236, + 103 + ], + [ + 1229, + 103 + ], + [ + 1223, + 102 + ], + [ + 1218, + 103 + ], + [ + 1214, + 109 + ], + [ + 1211, + 112 + ], + [ + 1204, + 114 + ], + [ + 1198, + 117 + ], + [ + 1196, + 124 + ], + [ + 1199, + 126 + ], + [ + 1194, + 127 + ], + [ + 1194, + 134 + ], + [ + 1200, + 135 + ], + [ + 1203, + 140 + ], + [ + 1200, + 148 + ], + [ + 1203, + 153 + ], + [ + 1208, + 152 + ], + [ + 1211, + 147 + ], + [ + 1212, + 151 + ], + [ + 1207, + 157 + ], + [ + 1214, + 160 + ], + [ + 1218, + 159 + ], + [ + 1223, + 168 + ], + [ + 1225, + 180 + ], + [ + 1273, + 243 + ], + [ + 1325, + 265 + ], + [ + 1326, + 270 + ], + [ + 1331, + 269 + ], + [ + 1334, + 267 + ], + [ + 1343, + 267 + ], + [ + 1344, + 268 + ], + [ + 1343, + 285 + ], + [ + 1346, + 314 + ], + [ + 1347, + 359 + ], + [ + 1347, + 417 + ], + [ + 1348, + 456 + ], + [ + 1348, + 463 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1204, + 447 + ], + [ + 1204, + 440 + ], + [ + 1206, + 436 + ], + [ + 1213, + 434 + ], + [ + 1222, + 434 + ], + [ + 1232, + 434 + ], + [ + 1240, + 436 + ], + [ + 1245, + 442 + ], + [ + 1247, + 448 + ], + [ + 1247, + 453 + ], + [ + 1247, + 460 + ], + [ + 1239, + 470 + ], + [ + 1222, + 472 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1315, + 424 + ], + [ + 1270, + 425 + ], + [ + 1255, + 425 + ], + [ + 1247, + 427 + ], + [ + 1244, + 431 + ], + [ + 1238, + 444 + ], + [ + 1236, + 461 + ], + [ + 1236, + 483 + ], + [ + 1242, + 501 + ], + [ + 1249, + 509 + ], + [ + 1257, + 510 + ], + [ + 1260, + 506 + ], + [ + 1276, + 503 + ], + [ + 1306, + 501 + ], + [ + 1325, + 500 + ], + [ + 1335, + 494 + ], + [ + 1333, + 476 + ], + [ + 1331, + 446 + ], + [ + 1327, + 433 + ], + [ + 1321, + 425 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1295, + 463 + ], + [ + 1325, + 462 + ], + [ + 1327, + 461 + ], + [ + 1328, + 455 + ], + [ + 1338, + 454 + ], + [ + 1338, + 458 + ], + [ + 1351, + 457 + ], + [ + 1356, + 461 + ], + [ + 1353, + 477 + ], + [ + 1340, + 501 + ], + [ + 1336, + 503 + ], + [ + 1320, + 503 + ], + [ + 1309, + 505 + ], + [ + 1308, + 514 + ], + [ + 1309, + 520 + ], + [ + 1303, + 522 + ], + [ + 1296, + 519 + ], + [ + 1288, + 506 + ], + [ + 1292, + 501 + ], + [ + 1298, + 506 + ], + [ + 1297, + 494 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1155, + 516 + ], + [ + 1154, + 523 + ], + [ + 1152, + 526 + ], + [ + 1143, + 527 + ], + [ + 1134, + 526 + ], + [ + 1133, + 523 + ], + [ + 1133, + 508 + ], + [ + 1134, + 484 + ], + [ + 1135, + 478 + ], + [ + 1130, + 478 + ], + [ + 1128, + 472 + ], + [ + 1133, + 469 + ], + [ + 1138, + 469 + ], + [ + 1147, + 460 + ], + [ + 1151, + 450 + ], + [ + 1156, + 443 + ], + [ + 1157, + 442 + ], + [ + 1198, + 442 + ], + [ + 1215, + 444 + ], + [ + 1227, + 445 + ], + [ + 1230, + 449 + ], + [ + 1235, + 458 + ], + [ + 1238, + 465 + ], + [ + 1239, + 468 + ], + [ + 1244, + 466 + ], + [ + 1247, + 467 + ], + [ + 1250, + 470 + ], + [ + 1249, + 474 + ], + [ + 1245, + 478 + ], + [ + 1245, + 483 + ], + [ + 1247, + 501 + ], + [ + 1246, + 512 + ], + [ + 1244, + 517 + ], + [ + 1244, + 528 + ], + [ + 1237, + 530 + ], + [ + 1230, + 530 + ], + [ + 1229, + 524 + ], + [ + 1229, + 519 + ], + [ + 1229, + 518 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1339, + 544 + ], + [ + 1336, + 544 + ], + [ + 1332, + 543 + ], + [ + 1329, + 540 + ], + [ + 1329, + 528 + ], + [ + 1329, + 510 + ], + [ + 1327, + 501 + ], + [ + 1327, + 492 + ], + [ + 1333, + 482 + ], + [ + 1330, + 482 + ], + [ + 1326, + 481 + ], + [ + 1324, + 473 + ], + [ + 1328, + 470 + ], + [ + 1334, + 471 + ], + [ + 1336, + 476 + ], + [ + 1337, + 472 + ], + [ + 1344, + 454 + ], + [ + 1349, + 447 + ], + [ + 1353, + 443 + ], + [ + 1363, + 440 + ], + [ + 1381, + 439 + ], + [ + 1397, + 438 + ], + [ + 1404, + 438 + ], + [ + 1404, + 462 + ], + [ + 1367, + 535 + ], + [ + 1355, + 546 + ], + [ + 1353, + 549 + ], + [ + 1345, + 550 + ], + [ + 1341, + 550 + ], + [ + 1340, + 547 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1373, + 551 + ], + [ + 1370, + 557 + ], + [ + 1367, + 558 + ], + [ + 1358, + 559 + ], + [ + 1352, + 551 + ], + [ + 1349, + 533 + ], + [ + 1353, + 524 + ], + [ + 1353, + 510 + ], + [ + 1354, + 499 + ], + [ + 1357, + 489 + ], + [ + 1360, + 486 + ], + [ + 1361, + 485 + ], + [ + 1357, + 484 + ], + [ + 1348, + 482 + ], + [ + 1350, + 475 + ], + [ + 1358, + 473 + ], + [ + 1364, + 475 + ], + [ + 1367, + 470 + ], + [ + 1372, + 461 + ], + [ + 1380, + 453 + ], + [ + 1398, + 446 + ], + [ + 1414, + 444 + ], + [ + 1421, + 501 + ], + [ + 1397, + 543 + ], + [ + 1384, + 550 + ], + [ + 1380, + 552 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1419, + 600 + ], + [ + 1403, + 601 + ], + [ + 1398, + 596 + ], + [ + 1395, + 585 + ], + [ + 1395, + 579 + ], + [ + 1391, + 579 + ], + [ + 1381, + 578 + ], + [ + 1377, + 571 + ], + [ + 1376, + 559 + ], + [ + 1374, + 530 + ], + [ + 1374, + 509 + ], + [ + 1377, + 494 + ], + [ + 1390, + 475 + ], + [ + 1395, + 466 + ], + [ + 1380, + 465 + ], + [ + 1369, + 461 + ], + [ + 1369, + 449 + ], + [ + 1379, + 444 + ], + [ + 1393, + 444 + ], + [ + 1426, + 445 + ], + [ + 1450, + 455 + ], + [ + 1464, + 584 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1747, + 203 + ], + [ + 1750, + 270 + ], + [ + 1677, + 270 + ], + [ + 1681, + 203 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1809, + 148 + ], + [ + 1807, + 266 + ], + [ + 1896, + 270 + ], + [ + 1889, + 144 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1909, + 389 + ], + [ + 1894, + 387 + ], + [ + 1889, + 400 + ], + [ + 1887, + 410 + ], + [ + 1871, + 420 + ], + [ + 1853, + 440 + ], + [ + 1849, + 495 + ], + [ + 1870, + 519 + ], + [ + 1878, + 543 + ], + [ + 1879, + 560 + ], + [ + 1905, + 561 + ], + [ + 1916, + 548 + ], + [ + 1918, + 504 + ], + [ + 1923, + 486 + ], + [ + 1935, + 488 + ], + [ + 1943, + 486 + ], + [ + 1946, + 466 + ], + [ + 1942, + 446 + ], + [ + 1925, + 423 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1801, + 661 + ], + [ + 1712, + 671 + ], + [ + 1571, + 677 + ], + [ + 1528, + 685 + ], + [ + 1492, + 684 + ], + [ + 1457, + 676 + ], + [ + 1443, + 646 + ], + [ + 1439, + 607 + ], + [ + 1435, + 603 + ], + [ + 1427, + 580 + ], + [ + 1430, + 563 + ], + [ + 1412, + 555 + ], + [ + 1391, + 544 + ], + [ + 1382, + 524 + ], + [ + 1384, + 493 + ], + [ + 1394, + 475 + ], + [ + 1392, + 455 + ], + [ + 1384, + 421 + ], + [ + 1388, + 382 + ], + [ + 1389, + 360 + ], + [ + 1382, + 338 + ], + [ + 1397, + 323 + ], + [ + 1397, + 300 + ], + [ + 1421, + 272 + ], + [ + 1447, + 245 + ], + [ + 1467, + 231 + ], + [ + 1493, + 228 + ], + [ + 1519, + 215 + ], + [ + 1553, + 242 + ], + [ + 1570, + 260 + ], + [ + 1584, + 266 + ], + [ + 1606, + 268 + ], + [ + 1632, + 269 + ], + [ + 1654, + 268 + ], + [ + 1668, + 264 + ], + [ + 1688, + 251 + ], + [ + 1709, + 254 + ], + [ + 1728, + 249 + ], + [ + 1751, + 237 + ], + [ + 1770, + 239 + ], + [ + 1791, + 229 + ], + [ + 1813, + 226 + ], + [ + 1827, + 246 + ], + [ + 1840, + 251 + ], + [ + 1852, + 238 + ], + [ + 1873, + 241 + ], + [ + 1881, + 225 + ], + [ + 1904, + 223 + ], + [ + 1940, + 220 + ], + [ + 1972, + 228 + ], + [ + 1950, + 272 + ], + [ + 1906, + 322 + ], + [ + 1887, + 339 + ], + [ + 1903, + 352 + ], + [ + 1883, + 377 + ], + [ + 1861, + 393 + ], + [ + 1866, + 405 + ], + [ + 1883, + 415 + ], + [ + 1896, + 422 + ], + [ + 1894, + 451 + ], + [ + 1878, + 477 + ], + [ + 1880, + 489 + ], + [ + 1898, + 482 + ], + [ + 1904, + 508 + ], + [ + 1888, + 558 + ], + [ + 1876, + 570 + ], + [ + 1863, + 576 + ], + [ + 1866, + 597 + ], + [ + 1875, + 614 + ], + [ + 1891, + 630 + ], + [ + 1889, + 657 + ], + [ + 1841, + 658 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1555, + 282 + ], + [ + 1556, + 225 + ], + [ + 1560, + 122 + ], + [ + 1554, + 168 + ], + [ + 1516, + 140 + ], + [ + 1496, + 129 + ], + [ + 1489, + 111 + ], + [ + 1481, + 115 + ], + [ + 1464, + 127 + ], + [ + 1447, + 123 + ], + [ + 1460, + 84 + ], + [ + 1472, + 72 + ], + [ + 1468, + 54 + ], + [ + 1458, + 57 + ], + [ + 1431, + 50 + ], + [ + 1412, + 27 + ], + [ + 1401, + 0 + ], + [ + 1843, + 0 + ], + [ + 1845, + 7 + ], + [ + 1839, + 22 + ], + [ + 1809, + 33 + ], + [ + 1783, + 30 + ], + [ + 1783, + 35 + ], + [ + 1800, + 48 + ], + [ + 1801, + 70 + ], + [ + 1767, + 67 + ], + [ + 1753, + 53 + ], + [ + 1738, + 64 + ], + [ + 1704, + 58 + ], + [ + 1688, + 59 + ], + [ + 1666, + 60 + ], + [ + 1660, + 62 + ], + [ + 1690, + 76 + ], + [ + 1723, + 98 + ], + [ + 1751, + 120 + ], + [ + 1749, + 143 + ], + [ + 1735, + 155 + ], + [ + 1701, + 154 + ], + [ + 1665, + 125 + ], + [ + 1637, + 111 + ], + [ + 1643, + 127 + ], + [ + 1678, + 182 + ], + [ + 1679, + 195 + ], + [ + 1677, + 216 + ], + [ + 1654, + 241 + ], + [ + 1614, + 246 + ], + [ + 1588, + 229 + ], + [ + 1572, + 230 + ], + [ + 1569, + 240 + ], + [ + 1570, + 305 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1645, + 273 + ], + [ + 1642, + 598 + ], + [ + 1656, + 595 + ], + [ + 1655, + 270 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1640, + 280 + ], + [ + 1615, + 267 + ], + [ + 1611, + 248 + ], + [ + 1614, + 230 + ], + [ + 1623, + 216 + ], + [ + 1638, + 210 + ], + [ + 1654, + 209 + ], + [ + 1669, + 209 + ], + [ + 1681, + 222 + ], + [ + 1687, + 234 + ], + [ + 1688, + 253 + ], + [ + 1679, + 267 + ], + [ + 1663, + 278 + ], + [ + 1652, + 282 + ], + [ + 1644, + 283 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1526, + 576 + ], + [ + 1532, + 239 + ], + [ + 1532, + 222 + ], + [ + 1497, + 223 + ], + [ + 1499, + 212 + ], + [ + 1499, + 183 + ], + [ + 1490, + 173 + ], + [ + 1494, + 145 + ], + [ + 1487, + 130 + ], + [ + 1498, + 0 + ], + [ + 1627, + 0 + ], + [ + 1646, + 150 + ], + [ + 1667, + 179 + ], + [ + 1676, + 192 + ], + [ + 1681, + 204 + ], + [ + 1668, + 228 + ], + [ + 1647, + 248 + ], + [ + 1618, + 247 + ], + [ + 1605, + 235 + ], + [ + 1571, + 234 + ], + [ + 1546, + 248 + ], + [ + 1540, + 570 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1937, + 715 + ], + [ + 1897, + 717 + ], + [ + 1902, + 1 + ], + [ + 1902, + 0 + ], + [ + 1937, + 0 + ], + [ + 1937, + 1 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1901, + 17 + ], + [ + 1884, + 19 + ], + [ + 1881, + 28 + ], + [ + 1869, + 33 + ], + [ + 1844, + 35 + ], + [ + 1836, + 38 + ], + [ + 1829, + 48 + ], + [ + 1830, + 61 + ], + [ + 1842, + 65 + ], + [ + 1873, + 71 + ], + [ + 1875, + 85 + ], + [ + 1871, + 91 + ], + [ + 1853, + 92 + ], + [ + 1837, + 95 + ], + [ + 1832, + 103 + ], + [ + 1834, + 119 + ], + [ + 1849, + 126 + ], + [ + 1873, + 130 + ], + [ + 1874, + 142 + ], + [ + 1874, + 148 + ], + [ + 1845, + 148 + ], + [ + 1834, + 155 + ], + [ + 1832, + 173 + ], + [ + 1841, + 181 + ], + [ + 1868, + 183 + ], + [ + 1878, + 187 + ], + [ + 1876, + 197 + ], + [ + 1885, + 202 + ], + [ + 1900, + 205 + ], + [ + 1903, + 204 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1061, + 392 + ], + [ + 1062, + 454 + ], + [ + 1058, + 454 + ], + [ + 1058, + 391 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1048, + 431 + ], + [ + 1047, + 422 + ], + [ + 1052, + 412 + ], + [ + 1057, + 406 + ], + [ + 1064, + 404 + ], + [ + 1076, + 403 + ], + [ + 1083, + 404 + ], + [ + 1095, + 406 + ], + [ + 1099, + 400 + ], + [ + 1099, + 394 + ], + [ + 1104, + 387 + ], + [ + 1104, + 382 + ], + [ + 1099, + 374 + ], + [ + 1101, + 367 + ], + [ + 1094, + 363 + ], + [ + 1088, + 360 + ], + [ + 1088, + 355 + ], + [ + 1093, + 347 + ], + [ + 1088, + 340 + ], + [ + 1083, + 334 + ], + [ + 1080, + 328 + ], + [ + 1088, + 323 + ], + [ + 1089, + 318 + ], + [ + 1082, + 316 + ], + [ + 1084, + 309 + ], + [ + 1089, + 307 + ], + [ + 1090, + 302 + ], + [ + 1081, + 299 + ], + [ + 1073, + 294 + ], + [ + 1064, + 287 + ], + [ + 1058, + 285 + ], + [ + 1046, + 284 + ], + [ + 1040, + 274 + ], + [ + 1035, + 270 + ], + [ + 1028, + 268 + ], + [ + 1028, + 417 + ], + [ + 1028, + 429 + ], + [ + 1028, + 440 + ], + [ + 1045, + 438 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1041, + 339 + ], + [ + 1044, + 435 + ], + [ + 1047, + 435 + ], + [ + 1045, + 337 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1037, + 435 + ], + [ + 1036, + 406 + ], + [ + 1040, + 400 + ], + [ + 1050, + 394 + ], + [ + 1080, + 388 + ], + [ + 1080, + 390 + ], + [ + 1054, + 396 + ], + [ + 1042, + 399 + ], + [ + 1039, + 403 + ], + [ + 1038, + 410 + ], + [ + 1038, + 438 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1077, + 398 + ], + [ + 1077, + 385 + ], + [ + 1084, + 384 + ], + [ + 1084, + 400 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1093, + 482 + ], + [ + 1093, + 472 + ], + [ + 1091, + 463 + ], + [ + 1093, + 462 + ], + [ + 1094, + 460 + ], + [ + 1092, + 459 + ], + [ + 1088, + 453 + ], + [ + 1082, + 449 + ], + [ + 1066, + 450 + ], + [ + 1057, + 450 + ], + [ + 1054, + 455 + ], + [ + 1057, + 476 + ], + [ + 1059, + 480 + ], + [ + 1064, + 480 + ], + [ + 1069, + 478 + ], + [ + 1076, + 478 + ], + [ + 1084, + 478 + ], + [ + 1087, + 481 + ], + [ + 1087, + 484 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1016, + 432 + ], + [ + 1017, + 429 + ], + [ + 1017, + 423 + ], + [ + 1017, + 421 + ], + [ + 1017, + 418 + ], + [ + 1014, + 416 + ], + [ + 1009, + 419 + ], + [ + 1009, + 421 + ], + [ + 1009, + 423 + ], + [ + 1010, + 428 + ], + [ + 1010, + 430 + ], + [ + 1010, + 433 + ], + [ + 1013, + 436 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 977, + 423 + ], + [ + 977, + 407 + ], + [ + 958, + 407 + ], + [ + 958, + 423 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 789, + 359 + ], + [ + 789, + 444 + ], + [ + 793, + 444 + ], + [ + 791, + 356 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 788, + 344 + ], + [ + 782, + 350 + ], + [ + 782, + 357 + ], + [ + 784, + 360 + ], + [ + 791, + 362 + ], + [ + 798, + 360 + ], + [ + 798, + 351 + ], + [ + 794, + 344 + ], + [ + 791, + 344 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 798, + 375 + ], + [ + 799, + 361 + ], + [ + 781, + 361 + ], + [ + 781, + 375 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 782, + 387 + ], + [ + 781, + 382 + ], + [ + 783, + 379 + ], + [ + 785, + 375 + ], + [ + 789, + 374 + ], + [ + 794, + 374 + ], + [ + 798, + 376 + ], + [ + 800, + 381 + ], + [ + 799, + 387 + ], + [ + 796, + 392 + ], + [ + 793, + 394 + ], + [ + 786, + 393 + ], + [ + 782, + 390 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 778, + 379 + ], + [ + 775, + 376 + ], + [ + 771, + 375 + ], + [ + 766, + 375 + ], + [ + 762, + 378 + ], + [ + 760, + 382 + ], + [ + 760, + 386 + ], + [ + 761, + 390 + ], + [ + 764, + 393 + ], + [ + 768, + 395 + ], + [ + 775, + 395 + ], + [ + 779, + 390 + ], + [ + 780, + 385 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 799, + 403 + ], + [ + 799, + 393 + ], + [ + 782, + 393 + ], + [ + 781, + 403 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 780, + 314 + ], + [ + 780, + 354 + ], + [ + 737, + 354 + ], + [ + 737, + 330 + ], + [ + 748, + 327 + ], + [ + 748, + 313 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 728, + 350 + ], + [ + 747, + 338 + ], + [ + 749, + 332 + ], + [ + 748, + 324 + ], + [ + 743, + 323 + ], + [ + 738, + 326 + ], + [ + 735, + 325 + ], + [ + 733, + 322 + ], + [ + 726, + 323 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 708, + 347 + ], + [ + 729, + 350 + ], + [ + 728, + 322 + ], + [ + 709, + 321 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 675, + 359 + ], + [ + 674, + 316 + ], + [ + 710, + 316 + ], + [ + 711, + 360 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 669, + 364 + ], + [ + 699, + 364 + ], + [ + 697, + 380 + ], + [ + 697, + 384 + ], + [ + 693, + 388 + ], + [ + 668, + 388 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1008, + 464 + ], + [ + 1007, + 448 + ], + [ + 1008, + 440 + ], + [ + 1015, + 432 + ], + [ + 1026, + 431 + ], + [ + 1037, + 431 + ], + [ + 1050, + 434 + ], + [ + 1057, + 436 + ], + [ + 1061, + 447 + ], + [ + 1062, + 455 + ], + [ + 1065, + 456 + ], + [ + 1071, + 455 + ], + [ + 1072, + 461 + ], + [ + 1068, + 468 + ], + [ + 1066, + 484 + ], + [ + 1067, + 493 + ], + [ + 1064, + 497 + ], + [ + 1054, + 499 + ], + [ + 1044, + 489 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 947, + 450 + ], + [ + 953, + 444 + ], + [ + 962, + 441 + ], + [ + 974, + 440 + ], + [ + 987, + 440 + ], + [ + 1000, + 443 + ], + [ + 1003, + 449 + ], + [ + 1003, + 464 + ], + [ + 995, + 484 + ], + [ + 989, + 496 + ], + [ + 973, + 499 + ], + [ + 962, + 499 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1042, + 501 + ], + [ + 997, + 501 + ], + [ + 994, + 503 + ], + [ + 993, + 508 + ], + [ + 983, + 507 + ], + [ + 979, + 505 + ], + [ + 977, + 496 + ], + [ + 977, + 483 + ], + [ + 979, + 472 + ], + [ + 980, + 469 + ], + [ + 973, + 469 + ], + [ + 970, + 464 + ], + [ + 976, + 462 + ], + [ + 982, + 459 + ], + [ + 988, + 451 + ], + [ + 990, + 447 + ], + [ + 996, + 446 + ], + [ + 1040, + 446 + ], + [ + 1042, + 447 + ], + [ + 1048, + 461 + ], + [ + 1049, + 465 + ], + [ + 1052, + 463 + ], + [ + 1059, + 463 + ], + [ + 1060, + 469 + ], + [ + 1056, + 473 + ], + [ + 1059, + 485 + ], + [ + 1059, + 495 + ], + [ + 1056, + 503 + ], + [ + 1051, + 505 + ], + [ + 1044, + 505 + ], + [ + 1043, + 505 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 849, + 446 + ], + [ + 843, + 440 + ], + [ + 834, + 437 + ], + [ + 831, + 437 + ], + [ + 791, + 436 + ], + [ + 777, + 438 + ], + [ + 762, + 441 + ], + [ + 753, + 449 + ], + [ + 752, + 471 + ], + [ + 757, + 502 + ], + [ + 767, + 518 + ], + [ + 782, + 520 + ], + [ + 786, + 518 + ], + [ + 788, + 514 + ], + [ + 796, + 514 + ], + [ + 797, + 517 + ], + [ + 806, + 518 + ], + [ + 811, + 513 + ], + [ + 817, + 510 + ], + [ + 843, + 508 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 852, + 548 + ], + [ + 849, + 544 + ], + [ + 847, + 540 + ], + [ + 838, + 540 + ], + [ + 840, + 549 + ], + [ + 834, + 551 + ], + [ + 825, + 551 + ], + [ + 819, + 545 + ], + [ + 819, + 531 + ], + [ + 818, + 502 + ], + [ + 823, + 479 + ], + [ + 827, + 464 + ], + [ + 838, + 435 + ], + [ + 842, + 429 + ], + [ + 846, + 425 + ], + [ + 861, + 424 + ], + [ + 922, + 423 + ], + [ + 942, + 426 + ], + [ + 952, + 436 + ], + [ + 962, + 451 + ], + [ + 964, + 464 + ], + [ + 965, + 458 + ], + [ + 976, + 458 + ], + [ + 984, + 464 + ], + [ + 981, + 469 + ], + [ + 967, + 473 + ], + [ + 974, + 479 + ], + [ + 976, + 491 + ], + [ + 976, + 508 + ], + [ + 976, + 540 + ], + [ + 975, + 545 + ], + [ + 962, + 545 + ], + [ + 961, + 547 + ], + [ + 959, + 552 + ], + [ + 950, + 553 + ], + [ + 944, + 553 + ], + [ + 941, + 548 + ], + [ + 940, + 538 + ], + [ + 863, + 538 + ], + [ + 861, + 545 + ], + [ + 856, + 548 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 665, + 440 + ], + [ + 681, + 436 + ], + [ + 694, + 436 + ], + [ + 718, + 436 + ], + [ + 734, + 436 + ], + [ + 743, + 442 + ], + [ + 753, + 451 + ], + [ + 761, + 462 + ], + [ + 762, + 465 + ], + [ + 768, + 464 + ], + [ + 773, + 469 + ], + [ + 771, + 479 + ], + [ + 780, + 488 + ], + [ + 780, + 495 + ], + [ + 781, + 511 + ], + [ + 781, + 527 + ], + [ + 776, + 532 + ], + [ + 771, + 533 + ], + [ + 765, + 532 + ], + [ + 765, + 522 + ], + [ + 745, + 522 + ], + [ + 745, + 532 + ], + [ + 742, + 536 + ], + [ + 736, + 538 + ], + [ + 726, + 537 + ], + [ + 726, + 530 + ], + [ + 723, + 526 + ], + [ + 706, + 526 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 508, + 437 + ], + [ + 511, + 428 + ], + [ + 515, + 420 + ], + [ + 523, + 413 + ], + [ + 538, + 405 + ], + [ + 548, + 405 + ], + [ + 560, + 406 + ], + [ + 631, + 407 + ], + [ + 643, + 405 + ], + [ + 655, + 406 + ], + [ + 677, + 421 + ], + [ + 691, + 447 + ], + [ + 696, + 450 + ], + [ + 696, + 444 + ], + [ + 703, + 443 + ], + [ + 710, + 449 + ], + [ + 712, + 454 + ], + [ + 701, + 458 + ], + [ + 713, + 471 + ], + [ + 717, + 491 + ], + [ + 719, + 522 + ], + [ + 716, + 545 + ], + [ + 713, + 550 + ], + [ + 707, + 553 + ], + [ + 696, + 553 + ], + [ + 690, + 544 + ], + [ + 691, + 543 + ], + [ + 674, + 541 + ], + [ + 674, + 551 + ], + [ + 672, + 559 + ], + [ + 666, + 561 + ], + [ + 653, + 563 + ], + [ + 645, + 560 + ], + [ + 644, + 553 + ], + [ + 644, + 546 + ], + [ + 592, + 542 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 325, + 373 + ], + [ + 325, + 337 + ], + [ + 365, + 325 + ], + [ + 366, + 372 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 345, + 444 + ], + [ + 357, + 436 + ], + [ + 381, + 429 + ], + [ + 414, + 426 + ], + [ + 466, + 423 + ], + [ + 501, + 426 + ], + [ + 523, + 431 + ], + [ + 539, + 442 + ], + [ + 557, + 464 + ], + [ + 560, + 465 + ], + [ + 568, + 464 + ], + [ + 575, + 468 + ], + [ + 579, + 474 + ], + [ + 573, + 482 + ], + [ + 582, + 491 + ], + [ + 594, + 514 + ], + [ + 595, + 526 + ], + [ + 601, + 542 + ], + [ + 600, + 557 + ], + [ + 596, + 573 + ], + [ + 588, + 577 + ], + [ + 573, + 579 + ], + [ + 562, + 578 + ], + [ + 558, + 576 + ], + [ + 553, + 567 + ], + [ + 519, + 566 + ], + [ + 519, + 578 + ], + [ + 515, + 587 + ], + [ + 508, + 593 + ], + [ + 496, + 594 + ], + [ + 486, + 594 + ], + [ + 482, + 586 + ], + [ + 481, + 574 + ], + [ + 472, + 574 + ], + [ + 468, + 570 + ], + [ + 466, + 568 + ], + [ + 444, + 568 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 22, + 467 + ], + [ + 45, + 452 + ], + [ + 85, + 424 + ], + [ + 122, + 419 + ], + [ + 154, + 414 + ], + [ + 216, + 414 + ], + [ + 275, + 414 + ], + [ + 308, + 416 + ], + [ + 349, + 428 + ], + [ + 370, + 445 + ], + [ + 393, + 470 + ], + [ + 394, + 464 + ], + [ + 408, + 464 + ], + [ + 416, + 476 + ], + [ + 419, + 483 + ], + [ + 408, + 487 + ], + [ + 409, + 490 + ], + [ + 436, + 504 + ], + [ + 453, + 522 + ], + [ + 457, + 560 + ], + [ + 456, + 579 + ], + [ + 452, + 603 + ], + [ + 447, + 612 + ], + [ + 434, + 616 + ], + [ + 415, + 616 + ], + [ + 409, + 611 + ], + [ + 407, + 596 + ], + [ + 405, + 595 + ], + [ + 326, + 606 + ], + [ + 325, + 612 + ], + [ + 321, + 632 + ], + [ + 320, + 637 + ], + [ + 314, + 642 + ], + [ + 301, + 643 + ], + [ + 286, + 643 + ], + [ + 281, + 643 + ], + [ + 264, + 634 + ], + [ + 259, + 624 + ], + [ + 253, + 613 + ], + [ + 226, + 613 + ], + [ + 174, + 613 + ], + [ + 115, + 617 + ], + [ + 69, + 622 + ], + [ + 61, + 643 + ], + [ + 42, + 657 + ], + [ + 25, + 657 + ], + [ + 13, + 646 + ], + [ + 9, + 633 + ], + [ + 0, + 626 + ], + [ + 0, + 466 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 51, + 503 + ], + [ + 147, + 503 + ], + [ + 137, + 526 + ], + [ + 51, + 525 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 861, + 519 + ], + [ + 907, + 519 + ], + [ + 908, + 507 + ], + [ + 861, + 507 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000138_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000138_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..9a0f7ba04aa820502d71f56c333fab4168d65b24 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000138_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000139_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000139_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..78a151473420c84d5473d05e476ee1f903b94e5d --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000139_000019_gtFine_polygons.json @@ -0,0 +1,5430 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 553, + 17 + ], + [ + 976, + 364 + ], + [ + 1082, + 360 + ], + [ + 1147, + 192 + ], + [ + 1179, + 0 + ], + [ + 515, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1984, + 693 + ], + [ + 1232, + 430 + ], + [ + 1056, + 431 + ], + [ + 984, + 427 + ], + [ + 886, + 431 + ], + [ + 474, + 457 + ], + [ + 0, + 515 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 728 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 0, + 816 + ], + [ + 889, + 475 + ], + [ + 781, + 456 + ], + [ + 389, + 510 + ], + [ + 0, + 574 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1131, + 465 + ], + [ + 1092, + 468 + ], + [ + 1067, + 467 + ], + [ + 1049, + 465 + ], + [ + 1039, + 457 + ], + [ + 1039, + 450 + ], + [ + 1067, + 439 + ], + [ + 1107, + 431 + ], + [ + 1153, + 422 + ], + [ + 1252, + 423 + ], + [ + 1311, + 433 + ], + [ + 1322, + 445 + ], + [ + 1301, + 457 + ], + [ + 1237, + 464 + ], + [ + 1181, + 465 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1963, + 988 + ], + [ + 1292, + 574 + ], + [ + 1154, + 498 + ], + [ + 1137, + 480 + ], + [ + 1151, + 473 + ], + [ + 1290, + 458 + ], + [ + 1553, + 443 + ], + [ + 1994, + 411 + ], + [ + 2048, + 518 + ], + [ + 2048, + 1024 + ], + [ + 2023, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 48, + 602 + ], + [ + 418, + 541 + ], + [ + 481, + 528 + ], + [ + 499, + 517 + ], + [ + 497, + 505 + ], + [ + 471, + 499 + ], + [ + 381, + 502 + ], + [ + 166, + 514 + ], + [ + 0, + 554 + ], + [ + 0, + 606 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 38, + 994 + ], + [ + 918, + 515 + ], + [ + 976, + 481 + ], + [ + 976, + 473 + ], + [ + 951, + 473 + ], + [ + 883, + 477 + ], + [ + 792, + 500 + ], + [ + 17, + 702 + ], + [ + 0, + 706 + ], + [ + 0, + 1024 + ], + [ + 5, + 1013 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 987, + 131 + ], + [ + 992, + 323 + ], + [ + 1009, + 326 + ], + [ + 1045, + 328 + ], + [ + 1072, + 419 + ], + [ + 1065, + 432 + ], + [ + 1043, + 434 + ], + [ + 1007, + 432 + ], + [ + 978, + 432 + ], + [ + 966, + 435 + ], + [ + 951, + 437 + ], + [ + 941, + 440 + ], + [ + 796, + 454 + ], + [ + 622, + 480 + ], + [ + 512, + 491 + ], + [ + 477, + 492 + ], + [ + 431, + 492 + ], + [ + 293, + 495 + ], + [ + 73, + 505 + ], + [ + 0, + 503 + ], + [ + 0, + 0 + ], + [ + 606, + 0 + ], + [ + 613, + 1 + ], + [ + 646, + 35 + ], + [ + 650, + 31 + ], + [ + 662, + 31 + ], + [ + 667, + 34 + ], + [ + 684, + 35 + ], + [ + 682, + 42 + ], + [ + 685, + 44 + ], + [ + 689, + 50 + ], + [ + 692, + 53 + ], + [ + 706, + 53 + ], + [ + 706, + 70 + ], + [ + 711, + 73 + ], + [ + 718, + 71 + ], + [ + 724, + 71 + ], + [ + 728, + 71 + ], + [ + 728, + 83 + ], + [ + 735, + 85 + ], + [ + 733, + 90 + ], + [ + 739, + 92 + ], + [ + 765, + 100 + ], + [ + 777, + 113 + ], + [ + 773, + 118 + ], + [ + 776, + 119 + ], + [ + 781, + 123 + ], + [ + 922, + 118 + ], + [ + 943, + 119 + ], + [ + 948, + 121 + ], + [ + 953, + 123 + ], + [ + 980, + 125 + ], + [ + 986, + 126 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1076, + 250 + ], + [ + 1106, + 246 + ], + [ + 1111, + 243 + ], + [ + 1112, + 226 + ], + [ + 1100, + 225 + ], + [ + 1096, + 202 + ], + [ + 1125, + 35 + ], + [ + 1128, + 34 + ], + [ + 1143, + 32 + ], + [ + 1150, + 4 + ], + [ + 1151, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 503 + ], + [ + 1821, + 480 + ], + [ + 1665, + 463 + ], + [ + 1460, + 460 + ], + [ + 1349, + 455 + ], + [ + 1254, + 450 + ], + [ + 1217, + 452 + ], + [ + 1182, + 450 + ], + [ + 1161, + 445 + ], + [ + 1139, + 442 + ], + [ + 1111, + 415 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1020, + 174 + ], + [ + 1018, + 168 + ], + [ + 1039, + 166 + ], + [ + 1039, + 172 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 893, + 178 + ], + [ + 890, + 184 + ], + [ + 875, + 183 + ], + [ + 876, + 176 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 930, + 219 + ], + [ + 930, + 226 + ], + [ + 914, + 226 + ], + [ + 914, + 221 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 950, + 242 + ], + [ + 950, + 248 + ], + [ + 938, + 247 + ], + [ + 938, + 243 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 964, + 271 + ], + [ + 965, + 276 + ], + [ + 951, + 276 + ], + [ + 951, + 271 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 988, + 401 + ], + [ + 988, + 366 + ], + [ + 981, + 366 + ], + [ + 981, + 404 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1002, + 396 + ], + [ + 1002, + 400 + ], + [ + 1005, + 400 + ], + [ + 1005, + 396 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1005, + 407 + ], + [ + 995, + 407 + ], + [ + 997, + 419 + ], + [ + 1008, + 416 + ] + ] + }, + { + "label": "bus", + "polygon": [ + [ + 989, + 417 + ], + [ + 977, + 415 + ], + [ + 965, + 416 + ], + [ + 961, + 419 + ], + [ + 960, + 429 + ], + [ + 962, + 437 + ], + [ + 986, + 436 + ], + [ + 1002, + 433 + ], + [ + 998, + 422 + ], + [ + 997, + 420 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 952, + 394 + ], + [ + 951, + 431 + ], + [ + 954, + 431 + ], + [ + 954, + 394 + ], + [ + 981, + 389 + ], + [ + 983, + 386 + ], + [ + 969, + 387 + ], + [ + 958, + 391 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 985, + 384 + ], + [ + 985, + 390 + ], + [ + 981, + 390 + ], + [ + 982, + 384 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 949, + 415 + ], + [ + 955, + 415 + ], + [ + 954, + 427 + ], + [ + 948, + 427 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 970, + 443 + ], + [ + 967, + 445 + ], + [ + 964, + 444 + ], + [ + 962, + 443 + ], + [ + 963, + 437 + ], + [ + 965, + 430 + ], + [ + 969, + 429 + ], + [ + 980, + 429 + ], + [ + 985, + 430 + ], + [ + 987, + 435 + ], + [ + 986, + 440 + ], + [ + 984, + 443 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 967, + 401 + ], + [ + 967, + 370 + ], + [ + 962, + 370 + ], + [ + 961, + 401 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 960, + 406 + ], + [ + 960, + 402 + ], + [ + 949, + 401 + ], + [ + 948, + 406 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1010, + 426 + ], + [ + 1007, + 423 + ], + [ + 1003, + 422 + ], + [ + 997, + 422 + ], + [ + 997, + 427 + ], + [ + 999, + 432 + ], + [ + 1009, + 430 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1042, + 401 + ], + [ + 1034, + 392 + ], + [ + 1028, + 395 + ], + [ + 1027, + 404 + ], + [ + 1020, + 415 + ], + [ + 1020, + 425 + ], + [ + 1027, + 429 + ], + [ + 1039, + 431 + ], + [ + 1042, + 423 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1030, + 442 + ], + [ + 1034, + 440 + ], + [ + 1035, + 433 + ], + [ + 1031, + 430 + ], + [ + 1028, + 427 + ], + [ + 1022, + 426 + ], + [ + 1016, + 427 + ], + [ + 1015, + 433 + ], + [ + 1016, + 440 + ], + [ + 1021, + 441 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1051, + 455 + ], + [ + 1047, + 456 + ], + [ + 1039, + 455 + ], + [ + 1037, + 444 + ], + [ + 1035, + 433 + ], + [ + 1034, + 423 + ], + [ + 1037, + 414 + ], + [ + 1039, + 408 + ], + [ + 1035, + 405 + ], + [ + 1032, + 400 + ], + [ + 1033, + 393 + ], + [ + 1030, + 389 + ], + [ + 1024, + 390 + ], + [ + 1025, + 385 + ], + [ + 1026, + 383 + ], + [ + 1016, + 380 + ], + [ + 1008, + 374 + ], + [ + 1012, + 370 + ], + [ + 1013, + 365 + ], + [ + 1008, + 362 + ], + [ + 1005, + 359 + ], + [ + 1003, + 351 + ], + [ + 1006, + 344 + ], + [ + 1005, + 341 + ], + [ + 997, + 340 + ], + [ + 999, + 334 + ], + [ + 1008, + 334 + ], + [ + 1011, + 331 + ], + [ + 1012, + 325 + ], + [ + 1007, + 323 + ], + [ + 1009, + 309 + ], + [ + 1009, + 304 + ], + [ + 1013, + 304 + ], + [ + 1021, + 303 + ], + [ + 1025, + 300 + ], + [ + 1023, + 294 + ], + [ + 1024, + 290 + ], + [ + 1024, + 287 + ], + [ + 1021, + 285 + ], + [ + 1014, + 285 + ], + [ + 1017, + 283 + ], + [ + 1022, + 280 + ], + [ + 1022, + 276 + ], + [ + 1022, + 273 + ], + [ + 1027, + 274 + ], + [ + 1028, + 272 + ], + [ + 1026, + 270 + ], + [ + 1026, + 268 + ], + [ + 1029, + 265 + ], + [ + 1030, + 264 + ], + [ + 1028, + 262 + ], + [ + 1026, + 260 + ], + [ + 1030, + 258 + ], + [ + 1031, + 257 + ], + [ + 1032, + 254 + ], + [ + 1037, + 255 + ], + [ + 1039, + 255 + ], + [ + 1038, + 251 + ], + [ + 1040, + 251 + ], + [ + 1044, + 252 + ], + [ + 1043, + 250 + ], + [ + 1047, + 248 + ], + [ + 1048, + 249 + ], + [ + 1052, + 251 + ], + [ + 1054, + 251 + ], + [ + 1058, + 247 + ], + [ + 1057, + 245 + ], + [ + 1061, + 244 + ], + [ + 1064, + 246 + ], + [ + 1067, + 248 + ], + [ + 1069, + 246 + ], + [ + 1072, + 245 + ], + [ + 1076, + 246 + ], + [ + 1080, + 246 + ], + [ + 1085, + 246 + ], + [ + 1088, + 245 + ], + [ + 1092, + 248 + ], + [ + 1092, + 253 + ], + [ + 1094, + 257 + ], + [ + 1101, + 258 + ], + [ + 1107, + 258 + ], + [ + 1107, + 264 + ], + [ + 1110, + 268 + ], + [ + 1113, + 273 + ], + [ + 1113, + 278 + ], + [ + 1115, + 282 + ], + [ + 1118, + 285 + ], + [ + 1115, + 290 + ], + [ + 1119, + 291 + ], + [ + 1120, + 293 + ], + [ + 1126, + 294 + ], + [ + 1124, + 301 + ], + [ + 1124, + 305 + ], + [ + 1124, + 312 + ], + [ + 1130, + 308 + ], + [ + 1133, + 309 + ], + [ + 1132, + 317 + ], + [ + 1132, + 323 + ], + [ + 1126, + 324 + ], + [ + 1128, + 330 + ], + [ + 1131, + 327 + ], + [ + 1136, + 332 + ], + [ + 1135, + 336 + ], + [ + 1141, + 338 + ], + [ + 1144, + 344 + ], + [ + 1143, + 352 + ], + [ + 1139, + 356 + ], + [ + 1133, + 364 + ], + [ + 1135, + 371 + ], + [ + 1136, + 376 + ], + [ + 1135, + 380 + ], + [ + 1136, + 382 + ], + [ + 1142, + 387 + ], + [ + 1145, + 396 + ], + [ + 1138, + 408 + ], + [ + 1131, + 418 + ], + [ + 1127, + 429 + ], + [ + 1128, + 438 + ], + [ + 1128, + 444 + ], + [ + 1126, + 449 + ], + [ + 1098, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1056, + 391 + ], + [ + 1057, + 435 + ], + [ + 1059, + 435 + ], + [ + 1058, + 390 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1053, + 412 + ], + [ + 1053, + 397 + ], + [ + 1062, + 397 + ], + [ + 1063, + 413 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1066, + 458 + ], + [ + 1059, + 458 + ], + [ + 1050, + 458 + ], + [ + 1037, + 457 + ], + [ + 1037, + 432 + ], + [ + 1042, + 428 + ], + [ + 1051, + 427 + ], + [ + 1063, + 427 + ], + [ + 1093, + 435 + ], + [ + 1102, + 436 + ], + [ + 1111, + 436 + ], + [ + 1117, + 441 + ], + [ + 1118, + 451 + ], + [ + 1115, + 455 + ], + [ + 1109, + 457 + ], + [ + 1100, + 458 + ], + [ + 1091, + 457 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1101, + 400 + ], + [ + 1102, + 460 + ], + [ + 1065, + 461 + ], + [ + 1065, + 400 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1090, + 354 + ], + [ + 1094, + 464 + ], + [ + 1091, + 464 + ], + [ + 1086, + 354 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1082, + 365 + ], + [ + 1091, + 364 + ], + [ + 1093, + 366 + ], + [ + 1093, + 368 + ], + [ + 1091, + 370 + ], + [ + 1090, + 376 + ], + [ + 1085, + 375 + ], + [ + 1086, + 369 + ], + [ + 1085, + 368 + ], + [ + 1082, + 368 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1080, + 378 + ], + [ + 1088, + 376 + ], + [ + 1088, + 392 + ], + [ + 1081, + 392 + ], + [ + 1080, + 387 + ], + [ + 1083, + 386 + ], + [ + 1085, + 385 + ], + [ + 1085, + 384 + ], + [ + 1079, + 383 + ], + [ + 1079, + 381 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1095, + 393 + ], + [ + 1093, + 390 + ], + [ + 1090, + 389 + ], + [ + 1086, + 390 + ], + [ + 1084, + 393 + ], + [ + 1084, + 393 + ], + [ + 1084, + 397 + ], + [ + 1085, + 401 + ], + [ + 1087, + 403 + ], + [ + 1092, + 402 + ], + [ + 1095, + 399 + ], + [ + 1095, + 396 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1098, + 365 + ], + [ + 1099, + 463 + ], + [ + 1101, + 463 + ], + [ + 1101, + 364 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1096, + 355 + ], + [ + 1101, + 355 + ], + [ + 1106, + 358 + ], + [ + 1105, + 360 + ], + [ + 1102, + 358 + ], + [ + 1102, + 369 + ], + [ + 1095, + 369 + ], + [ + 1095, + 359 + ], + [ + 1090, + 359 + ], + [ + 1091, + 357 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1079, + 462 + ], + [ + 1076, + 462 + ], + [ + 1075, + 439 + ], + [ + 1077, + 439 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1107, + 464 + ], + [ + 1104, + 463 + ], + [ + 1102, + 435 + ], + [ + 1106, + 437 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1123, + 414 + ], + [ + 1118, + 415 + ], + [ + 1114, + 417 + ], + [ + 1119, + 448 + ], + [ + 1121, + 458 + ], + [ + 1122, + 461 + ], + [ + 1125, + 463 + ], + [ + 1126, + 456 + ], + [ + 1125, + 447 + ], + [ + 1128, + 443 + ], + [ + 1128, + 431 + ], + [ + 1126, + 421 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1117, + 412 + ], + [ + 1113, + 411 + ], + [ + 1109, + 413 + ], + [ + 1109, + 415 + ], + [ + 1109, + 418 + ], + [ + 1110, + 420 + ], + [ + 1109, + 426 + ], + [ + 1109, + 434 + ], + [ + 1111, + 443 + ], + [ + 1113, + 449 + ], + [ + 1114, + 457 + ], + [ + 1115, + 461 + ], + [ + 1113, + 463 + ], + [ + 1122, + 464 + ], + [ + 1124, + 455 + ], + [ + 1121, + 436 + ], + [ + 1119, + 419 + ], + [ + 1117, + 416 + ], + [ + 1117, + 415 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1147, + 463 + ], + [ + 1149, + 463 + ], + [ + 1146, + 439 + ], + [ + 1145, + 439 + ], + [ + 1144, + 439 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1159, + 163 + ], + [ + 1154, + 163 + ], + [ + 1164, + 460 + ], + [ + 1171, + 460 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1130, + 167 + ], + [ + 1130, + 160 + ], + [ + 1155, + 161 + ], + [ + 1155, + 167 + ], + [ + 1148, + 169 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1168, + 271 + ], + [ + 1166, + 230 + ], + [ + 1147, + 231 + ], + [ + 1148, + 272 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1158, + 350 + ], + [ + 1151, + 352 + ], + [ + 1151, + 358 + ], + [ + 1154, + 358 + ], + [ + 1159, + 357 + ], + [ + 1159, + 360 + ], + [ + 1156, + 362 + ], + [ + 1152, + 363 + ], + [ + 1152, + 366 + ], + [ + 1157, + 368 + ], + [ + 1159, + 368 + ], + [ + 1157, + 371 + ], + [ + 1155, + 372 + ], + [ + 1153, + 372 + ], + [ + 1153, + 378 + ], + [ + 1157, + 380 + ], + [ + 1162, + 378 + ], + [ + 1163, + 378 + ], + [ + 1162, + 349 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1171, + 381 + ], + [ + 1169, + 349 + ], + [ + 1160, + 350 + ], + [ + 1160, + 382 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1179, + 353 + ], + [ + 1174, + 351 + ], + [ + 1172, + 346 + ], + [ + 1171, + 340 + ], + [ + 1172, + 335 + ], + [ + 1174, + 332 + ], + [ + 1177, + 331 + ], + [ + 1181, + 331 + ], + [ + 1184, + 334 + ], + [ + 1188, + 339 + ], + [ + 1188, + 344 + ], + [ + 1186, + 349 + ], + [ + 1183, + 353 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1184, + 371 + ], + [ + 1173, + 361 + ], + [ + 1184, + 351 + ], + [ + 1194, + 361 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1197, + 20 + ], + [ + 1200, + 56 + ], + [ + 1197, + 60 + ], + [ + 1204, + 250 + ], + [ + 1200, + 252 + ], + [ + 1207, + 481 + ], + [ + 1224, + 489 + ], + [ + 1216, + 252 + ], + [ + 1213, + 250 + ], + [ + 1207, + 59 + ], + [ + 1205, + 56 + ], + [ + 1202, + 0 + ], + [ + 1198, + 0 + ] + ] + }, + { + "label": "polegroup", + "polygon": [ + [ + 1162, + 477 + ], + [ + 1160, + 441 + ], + [ + 1219, + 445 + ], + [ + 1221, + 503 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1471, + 336 + ], + [ + 1464, + 90 + ], + [ + 1474, + 90 + ], + [ + 1484, + 338 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1713, + 448 + ], + [ + 1709, + 307 + ], + [ + 1507, + 330 + ], + [ + 1357, + 336 + ], + [ + 1367, + 468 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1501, + 323 + ], + [ + 1497, + 305 + ], + [ + 1497, + 279 + ], + [ + 1506, + 263 + ], + [ + 1518, + 253 + ], + [ + 1531, + 255 + ], + [ + 1552, + 268 + ], + [ + 1570, + 275 + ], + [ + 1586, + 259 + ], + [ + 1597, + 244 + ], + [ + 1617, + 248 + ], + [ + 1632, + 265 + ], + [ + 1681, + 247 + ], + [ + 1718, + 228 + ], + [ + 1732, + 211 + ], + [ + 1740, + 197 + ], + [ + 1752, + 191 + ], + [ + 1769, + 260 + ], + [ + 1759, + 266 + ], + [ + 1741, + 286 + ], + [ + 1728, + 300 + ], + [ + 1719, + 311 + ], + [ + 1719, + 315 + ], + [ + 1728, + 318 + ], + [ + 1747, + 294 + ], + [ + 1756, + 284 + ], + [ + 1771, + 276 + ], + [ + 1775, + 276 + ], + [ + 1781, + 308 + ], + [ + 1781, + 325 + ], + [ + 1762, + 337 + ], + [ + 1747, + 349 + ], + [ + 1728, + 361 + ], + [ + 1670, + 381 + ], + [ + 1640, + 391 + ], + [ + 1637, + 411 + ], + [ + 1640, + 423 + ], + [ + 1640, + 431 + ], + [ + 1599, + 434 + ], + [ + 1584, + 384 + ], + [ + 1560, + 366 + ], + [ + 1547, + 361 + ], + [ + 1529, + 359 + ], + [ + 1522, + 367 + ], + [ + 1501, + 381 + ], + [ + 1492, + 384 + ], + [ + 1494, + 398 + ], + [ + 1502, + 416 + ], + [ + 1511, + 426 + ], + [ + 1515, + 432 + ], + [ + 1517, + 449 + ], + [ + 1512, + 463 + ], + [ + 1501, + 470 + ], + [ + 1485, + 465 + ], + [ + 1488, + 454 + ], + [ + 1495, + 446 + ], + [ + 1490, + 438 + ], + [ + 1477, + 441 + ], + [ + 1465, + 433 + ], + [ + 1461, + 413 + ], + [ + 1466, + 402 + ], + [ + 1477, + 401 + ], + [ + 1474, + 385 + ], + [ + 1475, + 360 + ], + [ + 1477, + 339 + ], + [ + 1484, + 332 + ], + [ + 1493, + 339 + ], + [ + 1494, + 351 + ], + [ + 1492, + 368 + ], + [ + 1503, + 366 + ], + [ + 1506, + 344 + ], + [ + 1507, + 339 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 2044, + 231 + ], + [ + 1746, + 294 + ], + [ + 1652, + 316 + ], + [ + 1470, + 353 + ], + [ + 1374, + 370 + ], + [ + 1376, + 484 + ], + [ + 1510, + 507 + ], + [ + 1657, + 523 + ], + [ + 1778, + 548 + ], + [ + 1871, + 581 + ], + [ + 1974, + 579 + ], + [ + 1991, + 572 + ], + [ + 1972, + 564 + ], + [ + 2048, + 563 + ], + [ + 2048, + 233 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1357, + 187 + ], + [ + 1223, + 188 + ], + [ + 1217, + 167 + ], + [ + 1319, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 34 + ], + [ + 1693, + 168 + ], + [ + 1647, + 173 + ], + [ + 1645, + 136 + ], + [ + 1807, + 55 + ], + [ + 1500, + 67 + ], + [ + 1411, + 141 + ], + [ + 1410, + 148 + ], + [ + 1647, + 135 + ], + [ + 1647, + 173 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1264, + 179 + ], + [ + 1272, + 476 + ], + [ + 1298, + 476 + ], + [ + 1287, + 180 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1319, + 278 + ], + [ + 1307, + 300 + ], + [ + 1305, + 373 + ], + [ + 1313, + 428 + ], + [ + 1315, + 474 + ], + [ + 1322, + 473 + ], + [ + 1309, + 307 + ], + [ + 1310, + 294 + ], + [ + 1321, + 284 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1300, + 364 + ], + [ + 1301, + 386 + ], + [ + 1311, + 386 + ], + [ + 1310, + 362 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1322, + 123 + ], + [ + 1310, + 123 + ], + [ + 1322, + 477 + ], + [ + 1351, + 481 + ], + [ + 1340, + 122 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1337, + 3 + ], + [ + 1343, + 64 + ], + [ + 1335, + 70 + ], + [ + 1352, + 545 + ], + [ + 1384, + 543 + ], + [ + 1366, + 65 + ], + [ + 1363, + 59 + ], + [ + 1359, + 0 + ], + [ + 1359, + 0 + ], + [ + 1337, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1403, + 496 + ], + [ + 1387, + 27 + ], + [ + 1433, + 25 + ], + [ + 1447, + 498 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1265, + 394 + ], + [ + 1261, + 393 + ], + [ + 1253, + 394 + ], + [ + 1252, + 405 + ], + [ + 1251, + 411 + ], + [ + 1243, + 429 + ], + [ + 1245, + 443 + ], + [ + 1250, + 447 + ], + [ + 1252, + 460 + ], + [ + 1253, + 482 + ], + [ + 1260, + 486 + ], + [ + 1265, + 475 + ], + [ + 1268, + 458 + ], + [ + 1271, + 447 + ], + [ + 1276, + 436 + ], + [ + 1276, + 423 + ], + [ + 1270, + 410 + ], + [ + 1266, + 403 + ], + [ + 1266, + 398 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1290, + 399 + ], + [ + 1282, + 406 + ], + [ + 1280, + 412 + ], + [ + 1284, + 420 + ], + [ + 1287, + 436 + ], + [ + 1292, + 471 + ], + [ + 1303, + 480 + ], + [ + 1313, + 479 + ], + [ + 1311, + 459 + ], + [ + 1311, + 435 + ], + [ + 1313, + 422 + ], + [ + 1317, + 419 + ], + [ + 1319, + 405 + ], + [ + 1309, + 394 + ], + [ + 1302, + 389 + ], + [ + 1302, + 382 + ], + [ + 1294, + 381 + ], + [ + 1292, + 389 + ], + [ + 1292, + 394 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1385, + 281 + ], + [ + 1381, + 233 + ], + [ + 1335, + 235 + ], + [ + 1335, + 278 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1333, + 324 + ], + [ + 1331, + 307 + ], + [ + 1333, + 297 + ], + [ + 1336, + 289 + ], + [ + 1343, + 285 + ], + [ + 1353, + 284 + ], + [ + 1363, + 288 + ], + [ + 1368, + 298 + ], + [ + 1369, + 311 + ], + [ + 1364, + 322 + ], + [ + 1359, + 328 + ], + [ + 1349, + 332 + ], + [ + 1340, + 327 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1562, + 522 + ], + [ + 1543, + 0 + ], + [ + 1603, + 0 + ], + [ + 1623, + 527 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1232, + 505 + ], + [ + 1229, + 446 + ], + [ + 1235, + 445 + ], + [ + 1238, + 509 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1250, + 509 + ], + [ + 1243, + 446 + ], + [ + 1238, + 446 + ], + [ + 1241, + 511 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1260, + 519 + ], + [ + 1254, + 446 + ], + [ + 1263, + 446 + ], + [ + 1268, + 517 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1280, + 522 + ], + [ + 1276, + 447 + ], + [ + 1267, + 445 + ], + [ + 1270, + 524 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1299, + 532 + ], + [ + 1294, + 446 + ], + [ + 1299, + 446 + ], + [ + 1307, + 533 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1314, + 538 + ], + [ + 1308, + 444 + ], + [ + 1318, + 445 + ], + [ + 1323, + 540 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1358, + 559 + ], + [ + 1356, + 449 + ], + [ + 1363, + 449 + ], + [ + 1370, + 563 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1476, + 607 + ], + [ + 1474, + 461 + ], + [ + 1488, + 460 + ], + [ + 1488, + 603 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1610, + 660 + ], + [ + 1604, + 458 + ], + [ + 1625, + 457 + ], + [ + 1632, + 660 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1842, + 754 + ], + [ + 1830, + 471 + ], + [ + 1858, + 473 + ], + [ + 1874, + 754 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1190, + 273 + ], + [ + 1189, + 326 + ], + [ + 1270, + 324 + ], + [ + 1267, + 272 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 955, + 452 + ], + [ + 958, + 452 + ], + [ + 960, + 452 + ], + [ + 961, + 447 + ], + [ + 959, + 439 + ], + [ + 957, + 433 + ], + [ + 954, + 427 + ], + [ + 944, + 426 + ], + [ + 936, + 427 + ], + [ + 932, + 429 + ], + [ + 931, + 442 + ], + [ + 934, + 452 + ], + [ + 943, + 455 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 930, + 348 + ], + [ + 931, + 445 + ], + [ + 938, + 451 + ], + [ + 934, + 346 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 938, + 361 + ], + [ + 944, + 361 + ], + [ + 944, + 363 + ], + [ + 941, + 365 + ], + [ + 937, + 365 + ], + [ + 937, + 369 + ], + [ + 941, + 370 + ], + [ + 944, + 370 + ], + [ + 944, + 372 + ], + [ + 943, + 374 + ], + [ + 937, + 376 + ], + [ + 937, + 380 + ], + [ + 944, + 381 + ], + [ + 944, + 381 + ], + [ + 943, + 385 + ], + [ + 940, + 386 + ], + [ + 936, + 387 + ], + [ + 934, + 387 + ], + [ + 934, + 361 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 896, + 343 + ], + [ + 897, + 352 + ], + [ + 905, + 352 + ], + [ + 905, + 343 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 884, + 300 + ], + [ + 885, + 356 + ], + [ + 890, + 357 + ], + [ + 891, + 300 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 101, + 478 + ], + [ + 103, + 467 + ], + [ + 109, + 457 + ], + [ + 116, + 454 + ], + [ + 133, + 454 + ], + [ + 190, + 453 + ], + [ + 211, + 459 + ], + [ + 227, + 467 + ], + [ + 236, + 469 + ], + [ + 253, + 475 + ], + [ + 273, + 487 + ], + [ + 267, + 504 + ], + [ + 229, + 505 + ], + [ + 134, + 511 + ], + [ + 104, + 512 + ], + [ + 99, + 489 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 192, + 428 + ], + [ + 194, + 495 + ], + [ + 217, + 495 + ], + [ + 213, + 426 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 877, + 413 + ], + [ + 878, + 444 + ], + [ + 889, + 441 + ], + [ + 902, + 435 + ], + [ + 899, + 422 + ], + [ + 895, + 414 + ], + [ + 886, + 414 + ], + [ + 877, + 413 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 170, + 338 + ], + [ + 178, + 488 + ], + [ + 184, + 488 + ], + [ + 174, + 336 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 435, + 512 + ], + [ + 427, + 510 + ], + [ + 0, + 567 + ], + [ + 0, + 591 + ], + [ + 437, + 525 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 166, + 326 + ], + [ + 154, + 326 + ], + [ + 153, + 323 + ], + [ + 166, + 322 + ], + [ + 166, + 319 + ], + [ + 168, + 318 + ], + [ + 174, + 318 + ], + [ + 178, + 319 + ], + [ + 178, + 320 + ], + [ + 189, + 321 + ], + [ + 189, + 324 + ], + [ + 186, + 326 + ], + [ + 180, + 326 + ], + [ + 179, + 340 + ], + [ + 174, + 344 + ], + [ + 167, + 343 + ], + [ + 166, + 342 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 484, + 271 + ], + [ + 473, + 274 + ], + [ + 462, + 278 + ], + [ + 456, + 284 + ], + [ + 452, + 296 + ], + [ + 459, + 504 + ], + [ + 465, + 503 + ], + [ + 455, + 296 + ], + [ + 456, + 288 + ], + [ + 462, + 281 + ], + [ + 475, + 277 + ], + [ + 483, + 275 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 447, + 478 + ], + [ + 443, + 479 + ], + [ + 443, + 475 + ], + [ + 435, + 471 + ], + [ + 429, + 475 + ], + [ + 426, + 482 + ], + [ + 423, + 495 + ], + [ + 427, + 505 + ], + [ + 435, + 510 + ], + [ + 440, + 502 + ], + [ + 448, + 497 + ], + [ + 450, + 508 + ], + [ + 457, + 511 + ], + [ + 462, + 504 + ], + [ + 466, + 497 + ], + [ + 469, + 487 + ], + [ + 469, + 476 + ], + [ + 469, + 469 + ], + [ + 465, + 466 + ], + [ + 471, + 465 + ], + [ + 475, + 465 + ], + [ + 454, + 462 + ], + [ + 445, + 462 + ], + [ + 442, + 465 + ], + [ + 445, + 470 + ], + [ + 447, + 475 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 484, + 265 + ], + [ + 488, + 267 + ], + [ + 493, + 267 + ], + [ + 496, + 275 + ], + [ + 491, + 276 + ], + [ + 492, + 298 + ], + [ + 487, + 302 + ], + [ + 482, + 300 + ], + [ + 483, + 297 + ], + [ + 478, + 297 + ], + [ + 476, + 294 + ], + [ + 481, + 293 + ], + [ + 482, + 289 + ], + [ + 479, + 286 + ], + [ + 477, + 283 + ], + [ + 483, + 282 + ], + [ + 483, + 277 + ], + [ + 477, + 277 + ], + [ + 477, + 273 + ], + [ + 479, + 272 + ], + [ + 482, + 269 + ], + [ + 481, + 265 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 433, + 358 + ], + [ + 434, + 367 + ], + [ + 455, + 367 + ], + [ + 454, + 357 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 433, + 374 + ], + [ + 451, + 373 + ], + [ + 444, + 399 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 24, + 480 + ], + [ + 133, + 474 + ], + [ + 326, + 463 + ], + [ + 438, + 460 + ], + [ + 489, + 460 + ], + [ + 489, + 508 + ], + [ + 397, + 514 + ], + [ + 0, + 569 + ], + [ + 0, + 481 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 455, + 414 + ], + [ + 436, + 415 + ], + [ + 431, + 414 + ], + [ + 429, + 411 + ], + [ + 426, + 411 + ], + [ + 425, + 408 + ], + [ + 428, + 408 + ], + [ + 429, + 403 + ], + [ + 433, + 401 + ], + [ + 441, + 399 + ], + [ + 455, + 400 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 388, + 132 + ], + [ + 405, + 512 + ], + [ + 425, + 511 + ], + [ + 402, + 131 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 394, + 413 + ], + [ + 396, + 434 + ], + [ + 423, + 433 + ], + [ + 422, + 412 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 571, + 248 + ], + [ + 570, + 280 + ], + [ + 573, + 280 + ], + [ + 573, + 246 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 563, + 242 + ], + [ + 563, + 238 + ], + [ + 572, + 238 + ], + [ + 576, + 245 + ], + [ + 575, + 249 + ], + [ + 571, + 251 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 664, + 146 + ], + [ + 668, + 293 + ], + [ + 674, + 293 + ], + [ + 669, + 144 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 607, + 278 + ], + [ + 609, + 273 + ], + [ + 695, + 252 + ], + [ + 696, + 254 + ], + [ + 693, + 257 + ], + [ + 614, + 275 + ], + [ + 612, + 282 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 692, + 242 + ], + [ + 693, + 283 + ], + [ + 714, + 283 + ], + [ + 710, + 241 + ] + ] + }, + { + "label": "train", + "polygon": [ + [ + 544, + 276 + ], + [ + 513, + 278 + ], + [ + 492, + 281 + ], + [ + 493, + 296 + ], + [ + 487, + 301 + ], + [ + 484, + 328 + ], + [ + 479, + 330 + ], + [ + 475, + 344 + ], + [ + 474, + 357 + ], + [ + 474, + 369 + ], + [ + 482, + 381 + ], + [ + 483, + 388 + ], + [ + 482, + 475 + ], + [ + 483, + 506 + ], + [ + 490, + 514 + ], + [ + 491, + 532 + ], + [ + 497, + 541 + ], + [ + 512, + 542 + ], + [ + 533, + 541 + ], + [ + 537, + 548 + ], + [ + 566, + 550 + ], + [ + 612, + 546 + ], + [ + 707, + 525 + ], + [ + 792, + 501 + ], + [ + 875, + 472 + ], + [ + 871, + 387 + ], + [ + 855, + 359 + ], + [ + 807, + 339 + ], + [ + 782, + 331 + ], + [ + 744, + 314 + ], + [ + 737, + 289 + ], + [ + 734, + 282 + ], + [ + 768, + 234 + ], + [ + 772, + 228 + ], + [ + 778, + 229 + ], + [ + 783, + 236 + ], + [ + 784, + 231 + ], + [ + 778, + 224 + ], + [ + 770, + 222 + ], + [ + 708, + 225 + ], + [ + 698, + 229 + ], + [ + 691, + 236 + ], + [ + 692, + 240 + ], + [ + 699, + 235 + ], + [ + 704, + 231 + ], + [ + 721, + 280 + ], + [ + 724, + 284 + ], + [ + 731, + 309 + ], + [ + 732, + 311 + ], + [ + 668, + 285 + ], + [ + 657, + 286 + ], + [ + 652, + 286 + ], + [ + 644, + 280 + ], + [ + 642, + 276 + ], + [ + 597, + 274 + ], + [ + 570, + 272 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 824, + 289 + ], + [ + 828, + 367 + ], + [ + 873, + 368 + ], + [ + 867, + 290 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 859, + 402 + ], + [ + 849, + 390 + ], + [ + 838, + 391 + ], + [ + 833, + 403 + ], + [ + 835, + 412 + ], + [ + 824, + 416 + ], + [ + 821, + 430 + ], + [ + 840, + 442 + ], + [ + 857, + 449 + ], + [ + 883, + 444 + ], + [ + 886, + 436 + ], + [ + 884, + 430 + ], + [ + 883, + 413 + ], + [ + 873, + 413 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 928, + 351 + ], + [ + 929, + 381 + ], + [ + 882, + 379 + ], + [ + 882, + 354 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 957, + 478 + ], + [ + 860, + 520 + ], + [ + 852, + 469 + ], + [ + 861, + 441 + ], + [ + 867, + 434 + ], + [ + 869, + 413 + ], + [ + 860, + 405 + ], + [ + 855, + 394 + ], + [ + 851, + 347 + ], + [ + 780, + 354 + ], + [ + 748, + 339 + ], + [ + 748, + 339 + ], + [ + 854, + 332 + ], + [ + 890, + 349 + ], + [ + 888, + 354 + ], + [ + 892, + 436 + ], + [ + 908, + 436 + ], + [ + 905, + 378 + ], + [ + 889, + 378 + ], + [ + 886, + 363 + ], + [ + 902, + 363 + ], + [ + 929, + 371 + ], + [ + 934, + 438 + ], + [ + 951, + 439 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1023, + 450 + ], + [ + 1017, + 450 + ], + [ + 1009, + 444 + ], + [ + 1007, + 434 + ], + [ + 1014, + 431 + ], + [ + 1021, + 434 + ], + [ + 1025, + 445 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 992, + 451 + ], + [ + 991, + 455 + ], + [ + 986, + 455 + ], + [ + 985, + 450 + ], + [ + 986, + 440 + ], + [ + 988, + 435 + ], + [ + 991, + 428 + ], + [ + 1013, + 428 + ], + [ + 1018, + 436 + ], + [ + 1020, + 447 + ], + [ + 1020, + 454 + ], + [ + 1017, + 456 + ], + [ + 1014, + 454 + ], + [ + 1014, + 451 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 864, + 439 + ], + [ + 866, + 470 + ], + [ + 867, + 509 + ], + [ + 867, + 518 + ], + [ + 867, + 520 + ], + [ + 796, + 554 + ], + [ + 791, + 443 + ], + [ + 850, + 436 + ], + [ + 864, + 439 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 891, + 436 + ], + [ + 892, + 456 + ], + [ + 894, + 483 + ], + [ + 894, + 505 + ], + [ + 918, + 494 + ], + [ + 918, + 459 + ], + [ + 917, + 436 + ], + [ + 914, + 436 + ], + [ + 908, + 436 + ], + [ + 892, + 436 + ], + [ + 891, + 436 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 934, + 488 + ], + [ + 932, + 470 + ], + [ + 931, + 444 + ], + [ + 931, + 436 + ], + [ + 933, + 428 + ], + [ + 934, + 438 + ], + [ + 951, + 439 + ], + [ + 957, + 478 + ], + [ + 934, + 488 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..17d63e3539058b930deda9a9c2cc972ac204c800 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..a3e6cc097d5181cd0cffdae88d149f6fa4c26597 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..452c9d9e2a0d9a08c91f1f5a982899cc840501c3 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000140_000019_gtFine_polygons.json @@ -0,0 +1,4982 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 783, + 53 + ], + [ + 1127, + 386 + ], + [ + 1226, + 398 + ], + [ + 1294, + 223 + ], + [ + 1390, + 0 + ], + [ + 754, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2021, + 741 + ], + [ + 1266, + 451 + ], + [ + 1201, + 455 + ], + [ + 1184, + 447 + ], + [ + 1127, + 445 + ], + [ + 950, + 459 + ], + [ + 172, + 533 + ], + [ + 0, + 580 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 740 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1102, + 105 + ], + [ + 1110, + 332 + ], + [ + 1114, + 335 + ], + [ + 1136, + 336 + ], + [ + 1137, + 363 + ], + [ + 1151, + 364 + ], + [ + 1155, + 369 + ], + [ + 1188, + 372 + ], + [ + 1216, + 410 + ], + [ + 1221, + 437 + ], + [ + 1216, + 453 + ], + [ + 1186, + 454 + ], + [ + 1148, + 456 + ], + [ + 1113, + 456 + ], + [ + 1041, + 468 + ], + [ + 1020, + 471 + ], + [ + 1001, + 474 + ], + [ + 920, + 479 + ], + [ + 754, + 499 + ], + [ + 449, + 551 + ], + [ + 28, + 609 + ], + [ + 0, + 614 + ], + [ + 0, + 0 + ], + [ + 862, + 0 + ], + [ + 862, + 5 + ], + [ + 861, + 9 + ], + [ + 866, + 11 + ], + [ + 879, + 11 + ], + [ + 878, + 18 + ], + [ + 882, + 21 + ], + [ + 893, + 39 + ], + [ + 890, + 43 + ], + [ + 890, + 66 + ], + [ + 919, + 72 + ], + [ + 933, + 93 + ], + [ + 964, + 93 + ], + [ + 1017, + 92 + ], + [ + 1057, + 94 + ], + [ + 1061, + 99 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2001, + 597 + ], + [ + 1522, + 532 + ], + [ + 1474, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 595 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1185, + 439 + ], + [ + 1179, + 437 + ], + [ + 1169, + 437 + ], + [ + 1154, + 438 + ], + [ + 1143, + 439 + ], + [ + 1139, + 446 + ], + [ + 1140, + 451 + ], + [ + 1148, + 456 + ], + [ + 1160, + 457 + ], + [ + 1170, + 457 + ], + [ + 1182, + 456 + ], + [ + 1193, + 455 + ], + [ + 1195, + 449 + ], + [ + 1193, + 443 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1195, + 463 + ], + [ + 1185, + 459 + ], + [ + 1182, + 449 + ], + [ + 1182, + 437 + ], + [ + 1179, + 431 + ], + [ + 1175, + 426 + ], + [ + 1171, + 419 + ], + [ + 1169, + 415 + ], + [ + 1166, + 411 + ], + [ + 1161, + 408 + ], + [ + 1150, + 407 + ], + [ + 1141, + 403 + ], + [ + 1135, + 394 + ], + [ + 1133, + 385 + ], + [ + 1149, + 384 + ], + [ + 1155, + 382 + ], + [ + 1143, + 373 + ], + [ + 1143, + 364 + ], + [ + 1149, + 360 + ], + [ + 1154, + 363 + ], + [ + 1160, + 366 + ], + [ + 1168, + 353 + ], + [ + 1177, + 348 + ], + [ + 1176, + 345 + ], + [ + 1169, + 342 + ], + [ + 1167, + 331 + ], + [ + 1173, + 323 + ], + [ + 1177, + 324 + ], + [ + 1177, + 317 + ], + [ + 1179, + 311 + ], + [ + 1183, + 312 + ], + [ + 1183, + 307 + ], + [ + 1188, + 309 + ], + [ + 1197, + 316 + ], + [ + 1207, + 320 + ], + [ + 1208, + 317 + ], + [ + 1210, + 311 + ], + [ + 1214, + 306 + ], + [ + 1213, + 295 + ], + [ + 1219, + 293 + ], + [ + 1228, + 290 + ], + [ + 1226, + 271 + ], + [ + 1229, + 261 + ], + [ + 1238, + 250 + ], + [ + 1259, + 247 + ], + [ + 1311, + 310 + ], + [ + 1326, + 396 + ], + [ + 1328, + 459 + ], + [ + 1318, + 490 + ], + [ + 1294, + 506 + ], + [ + 1281, + 508 + ], + [ + 1269, + 503 + ], + [ + 1264, + 500 + ], + [ + 1243, + 499 + ], + [ + 1222, + 503 + ], + [ + 1223, + 493 + ], + [ + 1222, + 487 + ], + [ + 1219, + 486 + ], + [ + 1208, + 488 + ], + [ + 1206, + 482 + ], + [ + 1212, + 472 + ], + [ + 1209, + 469 + ], + [ + 1209, + 466 + ], + [ + 1209, + 457 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1184, + 409 + ], + [ + 1157, + 406 + ], + [ + 1157, + 404 + ], + [ + 1183, + 406 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1160, + 401 + ], + [ + 1159, + 411 + ], + [ + 1155, + 411 + ], + [ + 1155, + 400 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1177, + 389 + ], + [ + 1177, + 371 + ], + [ + 1185, + 372 + ], + [ + 1185, + 388 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1050, + 118 + ], + [ + 1050, + 126 + ], + [ + 1074, + 127 + ], + [ + 1074, + 118 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1080, + 209 + ], + [ + 1080, + 215 + ], + [ + 1099, + 214 + ], + [ + 1099, + 209 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1094, + 251 + ], + [ + 1093, + 257 + ], + [ + 1106, + 255 + ], + [ + 1106, + 251 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1116, + 305 + ], + [ + 1117, + 309 + ], + [ + 1106, + 309 + ], + [ + 1106, + 304 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1139, + 320 + ], + [ + 1138, + 313 + ], + [ + 1133, + 314 + ], + [ + 1133, + 321 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1145, + 352 + ], + [ + 1149, + 349 + ], + [ + 1152, + 351 + ], + [ + 1150, + 352 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1209, + 432 + ], + [ + 1205, + 430 + ], + [ + 1203, + 437 + ], + [ + 1205, + 443 + ], + [ + 1212, + 442 + ], + [ + 1213, + 436 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1198, + 445 + ], + [ + 1192, + 446 + ], + [ + 1188, + 448 + ], + [ + 1186, + 453 + ], + [ + 1185, + 459 + ], + [ + 1186, + 466 + ], + [ + 1191, + 467 + ], + [ + 1195, + 464 + ], + [ + 1201, + 463 + ], + [ + 1206, + 462 + ], + [ + 1209, + 448 + ], + [ + 1207, + 445 + ], + [ + 1203, + 445 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1216, + 441 + ], + [ + 1208, + 442 + ], + [ + 1203, + 448 + ], + [ + 1201, + 458 + ], + [ + 1201, + 469 + ], + [ + 1203, + 474 + ], + [ + 1212, + 472 + ], + [ + 1211, + 467 + ], + [ + 1212, + 462 + ], + [ + 1214, + 457 + ], + [ + 1214, + 453 + ], + [ + 1214, + 450 + ], + [ + 1216, + 448 + ], + [ + 1218, + 443 + ], + [ + 1219, + 441 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1277, + 377 + ], + [ + 1277, + 425 + ], + [ + 1283, + 425 + ], + [ + 1282, + 374 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1287, + 357 + ], + [ + 1283, + 353 + ], + [ + 1280, + 352 + ], + [ + 1275, + 352 + ], + [ + 1271, + 354 + ], + [ + 1266, + 358 + ], + [ + 1266, + 364 + ], + [ + 1266, + 369 + ], + [ + 1267, + 375 + ], + [ + 1271, + 378 + ], + [ + 1278, + 379 + ], + [ + 1283, + 378 + ], + [ + 1287, + 373 + ], + [ + 1289, + 366 + ], + [ + 1289, + 362 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1149, + 424 + ], + [ + 1149, + 430 + ], + [ + 1145, + 430 + ], + [ + 1145, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1138, + 468 + ], + [ + 1149, + 467 + ], + [ + 1150, + 469 + ], + [ + 1151, + 469 + ], + [ + 1153, + 468 + ], + [ + 1156, + 465 + ], + [ + 1155, + 461 + ], + [ + 1152, + 457 + ], + [ + 1150, + 457 + ], + [ + 1149, + 455 + ], + [ + 1146, + 454 + ], + [ + 1142, + 451 + ], + [ + 1139, + 450 + ], + [ + 1131, + 448 + ], + [ + 1125, + 450 + ], + [ + 1117, + 452 + ], + [ + 1117, + 464 + ], + [ + 1120, + 468 + ], + [ + 1123, + 469 + ], + [ + 1128, + 469 + ], + [ + 1131, + 469 + ], + [ + 1133, + 470 + ], + [ + 1135, + 470 + ], + [ + 1137, + 470 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1123, + 401 + ], + [ + 1122, + 409 + ], + [ + 1118, + 409 + ], + [ + 1118, + 400 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1058, + 363 + ], + [ + 1058, + 373 + ], + [ + 1068, + 374 + ], + [ + 1067, + 363 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1006, + 406 + ], + [ + 1005, + 464 + ], + [ + 1009, + 463 + ], + [ + 1007, + 405 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1004, + 408 + ], + [ + 1003, + 402 + ], + [ + 1006, + 402 + ], + [ + 1009, + 402 + ], + [ + 1011, + 404 + ], + [ + 1010, + 407 + ], + [ + 1008, + 408 + ], + [ + 1006, + 408 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1011, + 417 + ], + [ + 1013, + 413 + ], + [ + 1012, + 411 + ], + [ + 1008, + 411 + ], + [ + 1003, + 411 + ], + [ + 1001, + 415 + ], + [ + 1003, + 415 + ], + [ + 1007, + 417 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1012, + 431 + ], + [ + 1012, + 426 + ], + [ + 1009, + 423 + ], + [ + 1006, + 424 + ], + [ + 1004, + 426 + ], + [ + 1004, + 431 + ], + [ + 1006, + 432 + ], + [ + 1010, + 432 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 958, + 411 + ], + [ + 957, + 446 + ], + [ + 960, + 446 + ], + [ + 960, + 411 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 982, + 411 + ], + [ + 982, + 408 + ], + [ + 979, + 408 + ], + [ + 979, + 403 + ], + [ + 957, + 404 + ], + [ + 957, + 413 + ], + [ + 957, + 414 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 956, + 422 + ], + [ + 954, + 418 + ], + [ + 955, + 414 + ], + [ + 959, + 413 + ], + [ + 962, + 413 + ], + [ + 965, + 415 + ], + [ + 965, + 419 + ], + [ + 962, + 423 + ], + [ + 960, + 424 + ], + [ + 957, + 424 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 974, + 394 + ], + [ + 974, + 447 + ], + [ + 975, + 447 + ], + [ + 975, + 392 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 977, + 395 + ], + [ + 977, + 392 + ], + [ + 974, + 392 + ], + [ + 973, + 393 + ], + [ + 971, + 393 + ], + [ + 970, + 397 + ], + [ + 973, + 400 + ], + [ + 975, + 400 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 931, + 347 + ], + [ + 950, + 347 + ], + [ + 950, + 371 + ], + [ + 932, + 372 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1164, + 463 + ], + [ + 1163, + 465 + ], + [ + 1162, + 465 + ], + [ + 1161, + 459 + ], + [ + 1161, + 455 + ], + [ + 1163, + 450 + ], + [ + 1167, + 448 + ], + [ + 1172, + 447 + ], + [ + 1176, + 448 + ], + [ + 1179, + 451 + ], + [ + 1180, + 456 + ], + [ + 1180, + 462 + ], + [ + 1180, + 465 + ], + [ + 1179, + 465 + ], + [ + 1178, + 462 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 968, + 365 + ], + [ + 969, + 410 + ], + [ + 931, + 410 + ], + [ + 930, + 364 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 937, + 383 + ], + [ + 937, + 441 + ], + [ + 934, + 442 + ], + [ + 933, + 383 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 909, + 391 + ], + [ + 909, + 411 + ], + [ + 931, + 411 + ], + [ + 930, + 389 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 716, + 344 + ], + [ + 717, + 377 + ], + [ + 636, + 376 + ], + [ + 633, + 347 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 911, + 272 + ], + [ + 911, + 290 + ], + [ + 931, + 290 + ], + [ + 931, + 271 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 930, + 295 + ], + [ + 931, + 315 + ], + [ + 912, + 314 + ], + [ + 912, + 295 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 932, + 338 + ], + [ + 932, + 318 + ], + [ + 912, + 320 + ], + [ + 913, + 338 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 877, + 164 + ], + [ + 882, + 306 + ], + [ + 865, + 305 + ], + [ + 862, + 164 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 823, + 264 + ], + [ + 823, + 255 + ], + [ + 815, + 256 + ], + [ + 819, + 250 + ], + [ + 820, + 245 + ], + [ + 818, + 239 + ], + [ + 815, + 235 + ], + [ + 808, + 234 + ], + [ + 802, + 234 + ], + [ + 799, + 235 + ], + [ + 795, + 241 + ], + [ + 794, + 247 + ], + [ + 797, + 254 + ], + [ + 800, + 257 + ], + [ + 789, + 257 + ], + [ + 789, + 266 + ] + ] + }, + { + "label": "train", + "polygon": [ + [ + 1033, + 482 + ], + [ + 1028, + 481 + ], + [ + 1026, + 477 + ], + [ + 1019, + 475 + ], + [ + 1017, + 440 + ], + [ + 1018, + 425 + ], + [ + 1014, + 421 + ], + [ + 1015, + 409 + ], + [ + 1018, + 406 + ], + [ + 1018, + 401 + ], + [ + 1019, + 390 + ], + [ + 1022, + 378 + ], + [ + 1025, + 373 + ], + [ + 1030, + 372 + ], + [ + 1067, + 369 + ], + [ + 1075, + 368 + ], + [ + 1073, + 362 + ], + [ + 1071, + 353 + ], + [ + 1062, + 332 + ], + [ + 1059, + 328 + ], + [ + 1058, + 331 + ], + [ + 1053, + 331 + ], + [ + 1058, + 326 + ], + [ + 1094, + 324 + ], + [ + 1098, + 324 + ], + [ + 1103, + 328 + ], + [ + 1103, + 331 + ], + [ + 1098, + 328 + ], + [ + 1096, + 329 + ], + [ + 1094, + 331 + ], + [ + 1080, + 358 + ], + [ + 1082, + 371 + ], + [ + 1091, + 375 + ], + [ + 1098, + 378 + ], + [ + 1117, + 397 + ], + [ + 1118, + 418 + ], + [ + 1120, + 470 + ], + [ + 1118, + 472 + ], + [ + 1115, + 474 + ], + [ + 1109, + 476 + ], + [ + 1108, + 478 + ], + [ + 1095, + 481 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 963, + 450 + ], + [ + 963, + 447 + ], + [ + 961, + 443 + ], + [ + 971, + 440 + ], + [ + 978, + 440 + ], + [ + 986, + 439 + ], + [ + 992, + 439 + ], + [ + 998, + 442 + ], + [ + 1001, + 446 + ], + [ + 1008, + 454 + ], + [ + 1011, + 458 + ], + [ + 1013, + 464 + ], + [ + 1010, + 471 + ], + [ + 1010, + 478 + ], + [ + 1009, + 480 + ], + [ + 1007, + 479 + ], + [ + 1003, + 477 + ], + [ + 1001, + 479 + ], + [ + 999, + 481 + ], + [ + 994, + 481 + ], + [ + 992, + 476 + ], + [ + 973, + 477 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 913, + 446 + ], + [ + 918, + 443 + ], + [ + 923, + 441 + ], + [ + 945, + 439 + ], + [ + 959, + 439 + ], + [ + 966, + 448 + ], + [ + 970, + 453 + ], + [ + 973, + 451 + ], + [ + 976, + 453 + ], + [ + 975, + 456 + ], + [ + 974, + 460 + ], + [ + 976, + 473 + ], + [ + 974, + 478 + ], + [ + 973, + 483 + ], + [ + 971, + 485 + ], + [ + 966, + 486 + ], + [ + 962, + 486 + ], + [ + 961, + 483 + ], + [ + 945, + 485 + ], + [ + 929, + 485 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 902, + 441 + ], + [ + 907, + 441 + ], + [ + 913, + 439 + ], + [ + 919, + 443 + ], + [ + 924, + 449 + ], + [ + 927, + 454 + ], + [ + 928, + 455 + ], + [ + 934, + 454 + ], + [ + 937, + 455 + ], + [ + 938, + 460 + ], + [ + 936, + 463 + ], + [ + 938, + 472 + ], + [ + 939, + 480 + ], + [ + 935, + 486 + ], + [ + 930, + 488 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 755, + 435 + ], + [ + 756, + 427 + ], + [ + 759, + 408 + ], + [ + 761, + 389 + ], + [ + 765, + 379 + ], + [ + 768, + 376 + ], + [ + 839, + 371 + ], + [ + 855, + 373 + ], + [ + 876, + 378 + ], + [ + 881, + 382 + ], + [ + 885, + 392 + ], + [ + 894, + 419 + ], + [ + 897, + 431 + ], + [ + 897, + 448 + ], + [ + 883, + 469 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 654, + 444 + ], + [ + 656, + 440 + ], + [ + 660, + 437 + ], + [ + 669, + 434 + ], + [ + 692, + 432 + ], + [ + 695, + 432 + ], + [ + 698, + 430 + ], + [ + 701, + 430 + ], + [ + 709, + 431 + ], + [ + 736, + 430 + ], + [ + 749, + 430 + ], + [ + 762, + 435 + ], + [ + 765, + 441 + ], + [ + 766, + 455 + ], + [ + 760, + 526 + ], + [ + 743, + 527 + ], + [ + 727, + 528 + ], + [ + 711, + 529 + ], + [ + 691, + 519 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 544, + 254 + ], + [ + 549, + 454 + ], + [ + 554, + 454 + ], + [ + 555, + 454 + ], + [ + 547, + 251 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 537, + 255 + ], + [ + 535, + 252 + ], + [ + 524, + 251 + ], + [ + 517, + 250 + ], + [ + 519, + 244 + ], + [ + 537, + 242 + ], + [ + 539, + 242 + ], + [ + 539, + 238 + ], + [ + 545, + 236 + ], + [ + 553, + 236 + ], + [ + 554, + 240 + ], + [ + 554, + 243 + ], + [ + 568, + 244 + ], + [ + 573, + 246 + ], + [ + 573, + 248 + ], + [ + 551, + 248 + ], + [ + 554, + 270 + ], + [ + 550, + 278 + ], + [ + 544, + 284 + ], + [ + 537, + 277 + ], + [ + 536, + 274 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 562, + 302 + ], + [ + 561, + 296 + ], + [ + 560, + 292 + ], + [ + 555, + 287 + ], + [ + 547, + 286 + ], + [ + 543, + 287 + ], + [ + 538, + 291 + ], + [ + 535, + 298 + ], + [ + 534, + 308 + ], + [ + 538, + 315 + ], + [ + 546, + 318 + ], + [ + 559, + 316 + ], + [ + 565, + 311 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 563, + 336 + ], + [ + 564, + 314 + ], + [ + 535, + 317 + ], + [ + 537, + 339 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 565, + 353 + ], + [ + 565, + 348 + ], + [ + 562, + 340 + ], + [ + 559, + 338 + ], + [ + 546, + 339 + ], + [ + 537, + 340 + ], + [ + 535, + 348 + ], + [ + 535, + 357 + ], + [ + 538, + 363 + ], + [ + 546, + 366 + ], + [ + 555, + 366 + ], + [ + 561, + 361 + ], + [ + 562, + 357 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 564, + 381 + ], + [ + 564, + 365 + ], + [ + 538, + 366 + ], + [ + 537, + 381 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1270, + 497 + ], + [ + 1234, + 500 + ], + [ + 1232, + 503 + ], + [ + 1471, + 714 + ], + [ + 1916, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 698 + ], + [ + 1383, + 520 + ], + [ + 1278, + 496 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 506, + 349 + ], + [ + 503, + 315 + ], + [ + 399, + 317 + ], + [ + 401, + 350 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 252, + 327 + ], + [ + 250, + 277 + ], + [ + 138, + 280 + ], + [ + 141, + 329 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 20, + 137 + ], + [ + 24, + 454 + ], + [ + 35, + 452 + ], + [ + 26, + 133 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1270, + 497 + ], + [ + 1234, + 500 + ], + [ + 1232, + 503 + ], + [ + 1471, + 714 + ], + [ + 1916, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 698 + ], + [ + 1383, + 520 + ], + [ + 1278, + 496 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 38, + 135 + ], + [ + 35, + 92 + ], + [ + 65, + 92 + ], + [ + 74, + 85 + ], + [ + 68, + 80 + ], + [ + 42, + 76 + ], + [ + 37, + 66 + ], + [ + 18, + 63 + ], + [ + 2, + 65 + ], + [ + 0, + 68 + ], + [ + 0, + 89 + ], + [ + 3, + 92 + ], + [ + 7, + 97 + ], + [ + 11, + 140 + ], + [ + 21, + 153 + ], + [ + 27, + 151 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 648, + 180 + ], + [ + 610, + 180 + ], + [ + 604, + 1 + ], + [ + 643, + 2 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 506, + 484 + ], + [ + 511, + 476 + ], + [ + 518, + 467 + ], + [ + 531, + 451 + ], + [ + 534, + 441 + ], + [ + 547, + 436 + ], + [ + 591, + 434 + ], + [ + 633, + 436 + ], + [ + 657, + 440 + ], + [ + 672, + 451 + ], + [ + 683, + 466 + ], + [ + 685, + 464 + ], + [ + 690, + 468 + ], + [ + 692, + 471 + ], + [ + 689, + 477 + ], + [ + 696, + 481 + ], + [ + 706, + 491 + ], + [ + 711, + 504 + ], + [ + 709, + 533 + ], + [ + 709, + 546 + ], + [ + 705, + 549 + ], + [ + 696, + 549 + ], + [ + 689, + 547 + ], + [ + 685, + 541 + ], + [ + 684, + 540 + ], + [ + 669, + 540 + ], + [ + 669, + 549 + ], + [ + 667, + 555 + ], + [ + 661, + 560 + ], + [ + 651, + 560 + ], + [ + 640, + 558 + ], + [ + 640, + 551 + ], + [ + 639, + 546 + ], + [ + 620, + 547 + ], + [ + 611, + 546 + ], + [ + 608, + 545 + ], + [ + 583, + 545 + ], + [ + 583, + 549 + ], + [ + 578, + 558 + ], + [ + 571, + 561 + ], + [ + 562, + 559 + ], + [ + 560, + 549 + ], + [ + 559, + 547 + ], + [ + 541, + 549 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 298, + 453 + ], + [ + 305, + 446 + ], + [ + 318, + 439 + ], + [ + 349, + 434 + ], + [ + 382, + 432 + ], + [ + 423, + 431 + ], + [ + 451, + 434 + ], + [ + 483, + 453 + ], + [ + 499, + 470 + ], + [ + 506, + 466 + ], + [ + 516, + 470 + ], + [ + 515, + 480 + ], + [ + 517, + 486 + ], + [ + 537, + 505 + ], + [ + 546, + 515 + ], + [ + 549, + 555 + ], + [ + 547, + 572 + ], + [ + 543, + 583 + ], + [ + 536, + 584 + ], + [ + 526, + 584 + ], + [ + 519, + 580 + ], + [ + 518, + 573 + ], + [ + 516, + 568 + ], + [ + 496, + 567 + ], + [ + 477, + 568 + ], + [ + 471, + 571 + ], + [ + 470, + 571 + ], + [ + 468, + 582 + ], + [ + 465, + 591 + ], + [ + 464, + 598 + ], + [ + 456, + 602 + ], + [ + 444, + 602 + ], + [ + 436, + 599 + ], + [ + 429, + 588 + ], + [ + 429, + 582 + ], + [ + 429, + 581 + ], + [ + 397, + 581 + ], + [ + 396, + 588 + ], + [ + 394, + 592 + ], + [ + 386, + 593 + ], + [ + 372, + 593 + ], + [ + 368, + 587 + ], + [ + 368, + 585 + ], + [ + 353, + 585 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 46, + 460 + ], + [ + 46, + 450 + ], + [ + 51, + 441 + ], + [ + 55, + 437 + ], + [ + 55, + 424 + ], + [ + 71, + 419 + ], + [ + 116, + 413 + ], + [ + 154, + 411 + ], + [ + 202, + 409 + ], + [ + 248, + 413 + ], + [ + 277, + 422 + ], + [ + 297, + 441 + ], + [ + 316, + 470 + ], + [ + 322, + 464 + ], + [ + 334, + 466 + ], + [ + 341, + 470 + ], + [ + 340, + 478 + ], + [ + 334, + 483 + ], + [ + 345, + 498 + ], + [ + 350, + 519 + ], + [ + 352, + 533 + ], + [ + 362, + 543 + ], + [ + 365, + 562 + ], + [ + 363, + 585 + ], + [ + 362, + 600 + ], + [ + 356, + 610 + ], + [ + 349, + 616 + ], + [ + 337, + 619 + ], + [ + 325, + 616 + ], + [ + 323, + 608 + ], + [ + 279, + 616 + ], + [ + 276, + 628 + ], + [ + 269, + 640 + ], + [ + 251, + 642 + ], + [ + 232, + 642 + ], + [ + 222, + 637 + ], + [ + 219, + 627 + ], + [ + 216, + 616 + ], + [ + 188, + 617 + ], + [ + 159, + 621 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 38, + 659 + ], + [ + 109, + 652 + ], + [ + 111, + 669 + ], + [ + 126, + 672 + ], + [ + 153, + 668 + ], + [ + 165, + 657 + ], + [ + 174, + 642 + ], + [ + 173, + 609 + ], + [ + 168, + 589 + ], + [ + 168, + 567 + ], + [ + 162, + 544 + ], + [ + 142, + 523 + ], + [ + 118, + 500 + ], + [ + 116, + 493 + ], + [ + 108, + 485 + ], + [ + 95, + 481 + ], + [ + 38, + 444 + ], + [ + 9, + 428 + ], + [ + 0, + 425 + ], + [ + 0, + 662 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 766, + 549 + ], + [ + 765, + 559 + ], + [ + 760, + 564 + ], + [ + 750, + 566 + ], + [ + 740, + 563 + ], + [ + 735, + 551 + ], + [ + 735, + 528 + ], + [ + 735, + 499 + ], + [ + 741, + 481 + ], + [ + 754, + 456 + ], + [ + 763, + 436 + ], + [ + 768, + 430 + ], + [ + 777, + 426 + ], + [ + 817, + 423 + ], + [ + 820, + 420 + ], + [ + 821, + 420 + ], + [ + 823, + 422 + ], + [ + 846, + 421 + ], + [ + 870, + 421 + ], + [ + 891, + 426 + ], + [ + 910, + 443 + ], + [ + 917, + 455 + ], + [ + 919, + 464 + ], + [ + 924, + 459 + ], + [ + 933, + 457 + ], + [ + 942, + 459 + ], + [ + 942, + 467 + ], + [ + 928, + 472 + ], + [ + 936, + 483 + ], + [ + 939, + 494 + ], + [ + 940, + 524 + ], + [ + 939, + 540 + ], + [ + 938, + 548 + ], + [ + 937, + 549 + ], + [ + 924, + 550 + ], + [ + 920, + 550 + ], + [ + 917, + 545 + ], + [ + 919, + 540 + ], + [ + 912, + 540 + ], + [ + 913, + 552 + ], + [ + 908, + 556 + ], + [ + 901, + 560 + ], + [ + 893, + 560 + ], + [ + 882, + 556 + ], + [ + 881, + 547 + ], + [ + 803, + 548 + ], + [ + 803, + 553 + ], + [ + 800, + 556 + ], + [ + 793, + 558 + ], + [ + 783, + 557 + ], + [ + 782, + 547 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 844, + 524 + ], + [ + 843, + 512 + ], + [ + 786, + 513 + ], + [ + 784, + 527 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1717, + 396 + ], + [ + 1719, + 353 + ], + [ + 1683, + 357 + ], + [ + 1683, + 421 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1371, + 549 + ], + [ + 1335, + 553 + ], + [ + 1294, + 553 + ], + [ + 1277, + 550 + ], + [ + 1277, + 537 + ], + [ + 1287, + 526 + ], + [ + 1288, + 516 + ], + [ + 1288, + 501 + ], + [ + 1294, + 317 + ], + [ + 1256, + 277 + ], + [ + 1250, + 254 + ], + [ + 1242, + 245 + ], + [ + 1235, + 239 + ], + [ + 1235, + 226 + ], + [ + 1231, + 223 + ], + [ + 1202, + 223 + ], + [ + 1187, + 221 + ], + [ + 1183, + 213 + ], + [ + 1177, + 213 + ], + [ + 1169, + 210 + ], + [ + 1165, + 199 + ], + [ + 1168, + 190 + ], + [ + 1174, + 190 + ], + [ + 1170, + 187 + ], + [ + 1163, + 186 + ], + [ + 1158, + 178 + ], + [ + 1158, + 161 + ], + [ + 1165, + 150 + ], + [ + 1173, + 149 + ], + [ + 1176, + 142 + ], + [ + 1176, + 138 + ], + [ + 1171, + 138 + ], + [ + 1162, + 141 + ], + [ + 1150, + 137 + ], + [ + 1136, + 127 + ], + [ + 1132, + 118 + ], + [ + 1138, + 104 + ], + [ + 1142, + 104 + ], + [ + 1143, + 92 + ], + [ + 1148, + 90 + ], + [ + 1154, + 90 + ], + [ + 1160, + 88 + ], + [ + 1168, + 83 + ], + [ + 1166, + 78 + ], + [ + 1161, + 76 + ], + [ + 1160, + 72 + ], + [ + 1155, + 68 + ], + [ + 1146, + 67 + ], + [ + 1146, + 59 + ], + [ + 1149, + 43 + ], + [ + 1155, + 38 + ], + [ + 1142, + 39 + ], + [ + 1129, + 39 + ], + [ + 1114, + 36 + ], + [ + 1109, + 29 + ], + [ + 1109, + 20 + ], + [ + 1112, + 13 + ], + [ + 1120, + 14 + ], + [ + 1135, + 19 + ], + [ + 1147, + 16 + ], + [ + 1155, + 17 + ], + [ + 1162, + 17 + ], + [ + 1168, + 13 + ], + [ + 1179, + 12 + ], + [ + 1187, + 8 + ], + [ + 1190, + 5 + ], + [ + 1189, + 0 + ], + [ + 1796, + 0 + ], + [ + 1805, + 19 + ], + [ + 1810, + 48 + ], + [ + 1808, + 77 + ], + [ + 1801, + 93 + ], + [ + 1782, + 109 + ], + [ + 1755, + 117 + ], + [ + 1747, + 116 + ], + [ + 1737, + 130 + ], + [ + 1735, + 146 + ], + [ + 1754, + 148 + ], + [ + 1775, + 144 + ], + [ + 1788, + 136 + ], + [ + 1799, + 139 + ], + [ + 1805, + 153 + ], + [ + 1799, + 162 + ], + [ + 1777, + 170 + ], + [ + 1750, + 180 + ], + [ + 1738, + 187 + ], + [ + 1734, + 196 + ], + [ + 1740, + 203 + ], + [ + 1767, + 201 + ], + [ + 1782, + 198 + ], + [ + 1794, + 198 + ], + [ + 1798, + 209 + ], + [ + 1802, + 219 + ], + [ + 1812, + 228 + ], + [ + 1813, + 242 + ], + [ + 1810, + 255 + ], + [ + 1792, + 260 + ], + [ + 1754, + 273 + ], + [ + 1750, + 288 + ], + [ + 1750, + 305 + ], + [ + 1750, + 322 + ], + [ + 1724, + 338 + ], + [ + 1707, + 357 + ], + [ + 1691, + 372 + ], + [ + 1693, + 396 + ], + [ + 1693, + 424 + ], + [ + 1663, + 467 + ], + [ + 1473, + 494 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1875, + 302 + ], + [ + 1874, + 245 + ], + [ + 1805, + 245 + ], + [ + 1806, + 311 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1377, + 302 + ], + [ + 1365, + 301 + ], + [ + 1357, + 308 + ], + [ + 1352, + 319 + ], + [ + 1357, + 334 + ], + [ + 1376, + 338 + ], + [ + 1383, + 328 + ], + [ + 1386, + 312 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1362, + 354 + ], + [ + 1360, + 339 + ], + [ + 1379, + 336 + ], + [ + 1380, + 353 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1457, + 319 + ], + [ + 1458, + 424 + ], + [ + 1464, + 424 + ], + [ + 1462, + 308 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1471, + 329 + ], + [ + 1442, + 332 + ], + [ + 1439, + 234 + ], + [ + 1468, + 233 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1858, + 379 + ], + [ + 1858, + 367 + ], + [ + 1862, + 360 + ], + [ + 1869, + 354 + ], + [ + 1874, + 350 + ], + [ + 1874, + 344 + ], + [ + 1874, + 333 + ], + [ + 1878, + 325 + ], + [ + 1887, + 320 + ], + [ + 1898, + 319 + ], + [ + 1906, + 323 + ], + [ + 1908, + 334 + ], + [ + 1907, + 345 + ], + [ + 1909, + 353 + ], + [ + 1917, + 356 + ], + [ + 1919, + 370 + ], + [ + 1919, + 381 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1756, + 385 + ], + [ + 1752, + 47 + ], + [ + 1769, + 47 + ], + [ + 1781, + 383 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1737, + 48 + ], + [ + 1734, + 0 + ], + [ + 1775, + 0 + ], + [ + 1779, + 44 + ], + [ + 1778, + 51 + ], + [ + 1744, + 56 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1404, + 605 + ], + [ + 1391, + 606 + ], + [ + 1382, + 602 + ], + [ + 1381, + 593 + ], + [ + 1380, + 576 + ], + [ + 1379, + 552 + ], + [ + 1376, + 532 + ], + [ + 1376, + 519 + ], + [ + 1377, + 505 + ], + [ + 1384, + 496 + ], + [ + 1388, + 491 + ], + [ + 1372, + 490 + ], + [ + 1370, + 482 + ], + [ + 1370, + 475 + ], + [ + 1376, + 470 + ], + [ + 1387, + 470 + ], + [ + 1394, + 470 + ], + [ + 1398, + 471 + ], + [ + 1409, + 445 + ], + [ + 1415, + 432 + ], + [ + 1424, + 420 + ], + [ + 1436, + 412 + ], + [ + 1521, + 403 + ], + [ + 1572, + 402 + ], + [ + 1602, + 404 + ], + [ + 1612, + 405 + ], + [ + 1626, + 419 + ], + [ + 1652, + 448 + ], + [ + 1656, + 536 + ], + [ + 1604, + 591 + ], + [ + 1557, + 593 + ], + [ + 1527, + 596 + ], + [ + 1499, + 599 + ], + [ + 1466, + 599 + ], + [ + 1461, + 600 + ], + [ + 1460, + 613 + ], + [ + 1461, + 626 + ], + [ + 1457, + 637 + ], + [ + 1437, + 638 + ], + [ + 1428, + 634 + ], + [ + 1421, + 626 + ], + [ + 1418, + 606 + ], + [ + 1420, + 586 + ], + [ + 1411, + 585 + ], + [ + 1405, + 586 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1595, + 719 + ], + [ + 1566, + 719 + ], + [ + 1557, + 708 + ], + [ + 1546, + 654 + ], + [ + 1543, + 615 + ], + [ + 1541, + 575 + ], + [ + 1542, + 562 + ], + [ + 1570, + 533 + ], + [ + 1593, + 518 + ], + [ + 1598, + 511 + ], + [ + 1581, + 505 + ], + [ + 1575, + 496 + ], + [ + 1578, + 491 + ], + [ + 1591, + 488 + ], + [ + 1601, + 488 + ], + [ + 1606, + 489 + ], + [ + 1633, + 453 + ], + [ + 1653, + 425 + ], + [ + 1679, + 404 + ], + [ + 1704, + 390 + ], + [ + 1735, + 381 + ], + [ + 1778, + 375 + ], + [ + 1837, + 368 + ], + [ + 1890, + 364 + ], + [ + 1936, + 363 + ], + [ + 1998, + 363 + ], + [ + 2036, + 363 + ], + [ + 2048, + 365 + ], + [ + 2047, + 759 + ], + [ + 1924, + 768 + ], + [ + 1851, + 768 + ], + [ + 1816, + 771 + ], + [ + 1808, + 775 + ], + [ + 1806, + 803 + ], + [ + 1797, + 823 + ], + [ + 1778, + 826 + ], + [ + 1730, + 820 + ], + [ + 1720, + 802 + ], + [ + 1710, + 781 + ], + [ + 1707, + 756 + ], + [ + 1706, + 742 + ], + [ + 1606, + 702 + ], + [ + 1606, + 710 + ], + [ + 1601, + 718 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 2036, + 988 + ], + [ + 1995, + 973 + ], + [ + 1910, + 931 + ], + [ + 1869, + 887 + ], + [ + 1857, + 865 + ], + [ + 1859, + 836 + ], + [ + 1857, + 804 + ], + [ + 1875, + 785 + ], + [ + 1915, + 784 + ], + [ + 1968, + 788 + ], + [ + 1985, + 771 + ], + [ + 1996, + 738 + ], + [ + 1994, + 664 + ], + [ + 2000, + 602 + ], + [ + 1980, + 582 + ], + [ + 1977, + 528 + ], + [ + 2000, + 479 + ], + [ + 2024, + 444 + ], + [ + 2048, + 411 + ], + [ + 2048, + 991 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000141_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000141_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..bd48125aab48ad39a226683b86022a91d319c0fc Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000141_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000141_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000141_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..53c8d9be0b9ef639c6cf29e2eae2746868a810d1 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000141_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000142_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000142_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..cd40933fd25790094825825afe5e757da77f9c1b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000142_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000142_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000142_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..936efacb668c822197430baaa10093d4271058ca --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000142_000019_gtFine_polygons.json @@ -0,0 +1,8016 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "terrain", + "polygon": [ + [ + 730, + 481 + ], + [ + 716, + 482 + ], + [ + 693, + 483 + ], + [ + 686, + 475 + ], + [ + 685, + 462 + ], + [ + 685, + 455 + ], + [ + 686, + 450 + ], + [ + 686, + 444 + ], + [ + 691, + 440 + ], + [ + 700, + 438 + ], + [ + 708, + 437 + ], + [ + 717, + 442 + ], + [ + 725, + 450 + ], + [ + 732, + 458 + ], + [ + 738, + 467 + ], + [ + 740, + 477 + ], + [ + 738, + 480 + ], + [ + 734, + 481 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 692, + 22 + ], + [ + 890, + 288 + ], + [ + 1077, + 380 + ], + [ + 1146, + 364 + ], + [ + 1263, + 330 + ], + [ + 1545, + 76 + ], + [ + 1556, + 0 + ], + [ + 675, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1892, + 684 + ], + [ + 1261, + 467 + ], + [ + 1201, + 450 + ], + [ + 1159, + 447 + ], + [ + 1152, + 441 + ], + [ + 1116, + 437 + ], + [ + 1034, + 442 + ], + [ + 909, + 451 + ], + [ + 104, + 491 + ], + [ + 0, + 509 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 750 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 14, + 551 + ], + [ + 112, + 546 + ], + [ + 167, + 541 + ], + [ + 177, + 539 + ], + [ + 149, + 523 + ], + [ + 149, + 515 + ], + [ + 291, + 512 + ], + [ + 347, + 506 + ], + [ + 442, + 500 + ], + [ + 476, + 497 + ], + [ + 561, + 500 + ], + [ + 582, + 499 + ], + [ + 584, + 494 + ], + [ + 550, + 484 + ], + [ + 594, + 486 + ], + [ + 631, + 486 + ], + [ + 657, + 485 + ], + [ + 697, + 488 + ], + [ + 731, + 488 + ], + [ + 753, + 481 + ], + [ + 744, + 476 + ], + [ + 740, + 468 + ], + [ + 791, + 466 + ], + [ + 810, + 465 + ], + [ + 837, + 470 + ], + [ + 864, + 471 + ], + [ + 886, + 466 + ], + [ + 920, + 459 + ], + [ + 904, + 454 + ], + [ + 887, + 453 + ], + [ + 839, + 453 + ], + [ + 0, + 484 + ], + [ + 0, + 553 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1858, + 1003 + ], + [ + 1283, + 536 + ], + [ + 1280, + 530 + ], + [ + 1305, + 523 + ], + [ + 1355, + 513 + ], + [ + 1353, + 503 + ], + [ + 1323, + 506 + ], + [ + 1280, + 508 + ], + [ + 1252, + 509 + ], + [ + 1238, + 499 + ], + [ + 1225, + 490 + ], + [ + 1223, + 483 + ], + [ + 1224, + 479 + ], + [ + 1239, + 478 + ], + [ + 1237, + 477 + ], + [ + 1228, + 478 + ], + [ + 1214, + 470 + ], + [ + 1211, + 465 + ], + [ + 1219, + 464 + ], + [ + 1232, + 463 + ], + [ + 1228, + 462 + ], + [ + 1208, + 462 + ], + [ + 1195, + 454 + ], + [ + 1196, + 450 + ], + [ + 1199, + 449 + ], + [ + 1210, + 445 + ], + [ + 1238, + 449 + ], + [ + 1265, + 461 + ], + [ + 1391, + 484 + ], + [ + 2048, + 677 + ], + [ + 2048, + 1024 + ], + [ + 1885, + 1024 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1522, + 49 + ], + [ + 1460, + 49 + ], + [ + 1459, + 37 + ], + [ + 1454, + 35 + ], + [ + 1454, + 19 + ], + [ + 1450, + 19 + ], + [ + 1450, + 35 + ], + [ + 1447, + 36 + ], + [ + 1446, + 50 + ], + [ + 1431, + 51 + ], + [ + 1431, + 38 + ], + [ + 1431, + 35 + ], + [ + 1421, + 35 + ], + [ + 1417, + 42 + ], + [ + 1417, + 51 + ], + [ + 1381, + 51 + ], + [ + 1376, + 54 + ], + [ + 1377, + 42 + ], + [ + 1377, + 41 + ], + [ + 1366, + 41 + ], + [ + 1364, + 46 + ], + [ + 1365, + 59 + ], + [ + 1345, + 69 + ], + [ + 1344, + 41 + ], + [ + 1346, + 39 + ], + [ + 1319, + 39 + ], + [ + 1317, + 41 + ], + [ + 1320, + 45 + ], + [ + 1318, + 83 + ], + [ + 1280, + 102 + ], + [ + 1268, + 131 + ], + [ + 1240, + 136 + ], + [ + 1232, + 184 + ], + [ + 1233, + 192 + ], + [ + 1238, + 191 + ], + [ + 1238, + 197 + ], + [ + 1234, + 196 + ], + [ + 1235, + 200 + ], + [ + 1238, + 201 + ], + [ + 1238, + 204 + ], + [ + 1233, + 203 + ], + [ + 1235, + 205 + ], + [ + 1238, + 207 + ], + [ + 1238, + 210 + ], + [ + 1234, + 209 + ], + [ + 1233, + 209 + ], + [ + 1231, + 211 + ], + [ + 1236, + 214 + ], + [ + 1236, + 216 + ], + [ + 1228, + 223 + ], + [ + 1226, + 234 + ], + [ + 1223, + 235 + ], + [ + 1212, + 250 + ], + [ + 1214, + 258 + ], + [ + 1210, + 263 + ], + [ + 1207, + 268 + ], + [ + 1201, + 268 + ], + [ + 1200, + 276 + ], + [ + 1200, + 284 + ], + [ + 1196, + 289 + ], + [ + 1153, + 334 + ], + [ + 1114, + 356 + ], + [ + 1103, + 356 + ], + [ + 1095, + 357 + ], + [ + 1087, + 360 + ], + [ + 1084, + 359 + ], + [ + 1073, + 345 + ], + [ + 1067, + 339 + ], + [ + 1062, + 339 + ], + [ + 1049, + 339 + ], + [ + 1046, + 335 + ], + [ + 1043, + 329 + ], + [ + 1041, + 324 + ], + [ + 1041, + 319 + ], + [ + 1035, + 315 + ], + [ + 1033, + 307 + ], + [ + 1028, + 306 + ], + [ + 1027, + 302 + ], + [ + 1022, + 302 + ], + [ + 1013, + 295 + ], + [ + 1009, + 296 + ], + [ + 1005, + 298 + ], + [ + 1005, + 282 + ], + [ + 1008, + 279 + ], + [ + 1011, + 278 + ], + [ + 1008, + 275 + ], + [ + 989, + 275 + ], + [ + 988, + 273 + ], + [ + 986, + 272 + ], + [ + 983, + 268 + ], + [ + 989, + 265 + ], + [ + 989, + 262 + ], + [ + 984, + 256 + ], + [ + 965, + 256 + ], + [ + 965, + 252 + ], + [ + 953, + 242 + ], + [ + 949, + 225 + ], + [ + 941, + 213 + ], + [ + 936, + 209 + ], + [ + 925, + 209 + ], + [ + 912, + 198 + ], + [ + 907, + 190 + ], + [ + 800, + 101 + ], + [ + 733, + 31 + ], + [ + 703, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 498 + ], + [ + 275, + 493 + ], + [ + 323, + 493 + ], + [ + 473, + 480 + ], + [ + 590, + 468 + ], + [ + 654, + 472 + ], + [ + 693, + 470 + ], + [ + 751, + 465 + ], + [ + 806, + 464 + ], + [ + 839, + 460 + ], + [ + 879, + 459 + ], + [ + 900, + 456 + ], + [ + 929, + 455 + ], + [ + 956, + 455 + ], + [ + 973, + 452 + ], + [ + 1008, + 450 + ], + [ + 1042, + 449 + ], + [ + 1056, + 448 + ], + [ + 1086, + 448 + ], + [ + 1101, + 445 + ], + [ + 1123, + 445 + ], + [ + 1137, + 447 + ], + [ + 1158, + 447 + ], + [ + 1186, + 450 + ], + [ + 1206, + 450 + ], + [ + 1217, + 451 + ], + [ + 1230, + 453 + ], + [ + 1242, + 458 + ], + [ + 1322, + 464 + ], + [ + 1492, + 458 + ], + [ + 1557, + 92 + ], + [ + 1548, + 42 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1138, + 437 + ], + [ + 1126, + 436 + ], + [ + 1112, + 437 + ], + [ + 1106, + 439 + ], + [ + 1101, + 443 + ], + [ + 1107, + 447 + ], + [ + 1120, + 447 + ], + [ + 1134, + 448 + ], + [ + 1143, + 448 + ], + [ + 1148, + 445 + ], + [ + 1147, + 442 + ], + [ + 1142, + 438 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1086, + 408 + ], + [ + 1090, + 407 + ], + [ + 1093, + 410 + ], + [ + 1094, + 416 + ], + [ + 1098, + 425 + ], + [ + 1101, + 434 + ], + [ + 1102, + 441 + ], + [ + 1104, + 442 + ], + [ + 1110, + 441 + ], + [ + 1119, + 437 + ], + [ + 1121, + 434 + ], + [ + 1125, + 432 + ], + [ + 1131, + 431 + ], + [ + 1132, + 429 + ], + [ + 1128, + 421 + ], + [ + 1128, + 415 + ], + [ + 1131, + 415 + ], + [ + 1133, + 418 + ], + [ + 1136, + 413 + ], + [ + 1142, + 409 + ], + [ + 1145, + 413 + ], + [ + 1146, + 422 + ], + [ + 1150, + 428 + ], + [ + 1154, + 429 + ], + [ + 1160, + 422 + ], + [ + 1162, + 422 + ], + [ + 1163, + 429 + ], + [ + 1164, + 437 + ], + [ + 1169, + 439 + ], + [ + 1177, + 439 + ], + [ + 1179, + 434 + ], + [ + 1178, + 429 + ], + [ + 1179, + 426 + ], + [ + 1181, + 425 + ], + [ + 1185, + 427 + ], + [ + 1185, + 435 + ], + [ + 1189, + 431 + ], + [ + 1187, + 424 + ], + [ + 1188, + 416 + ], + [ + 1194, + 415 + ], + [ + 1200, + 408 + ], + [ + 1202, + 400 + ], + [ + 1203, + 373 + ], + [ + 1204, + 358 + ], + [ + 1202, + 347 + ], + [ + 1203, + 328 + ], + [ + 1200, + 320 + ], + [ + 1205, + 314 + ], + [ + 1205, + 302 + ], + [ + 1206, + 296 + ], + [ + 1202, + 292 + ], + [ + 1196, + 268 + ], + [ + 1203, + 256 + ], + [ + 1204, + 248 + ], + [ + 1201, + 244 + ], + [ + 1202, + 234 + ], + [ + 1201, + 219 + ], + [ + 1190, + 217 + ], + [ + 1179, + 212 + ], + [ + 1172, + 216 + ], + [ + 1164, + 223 + ], + [ + 1162, + 234 + ], + [ + 1163, + 246 + ], + [ + 1156, + 240 + ], + [ + 1148, + 241 + ], + [ + 1140, + 248 + ], + [ + 1145, + 256 + ], + [ + 1145, + 260 + ], + [ + 1137, + 260 + ], + [ + 1131, + 265 + ], + [ + 1124, + 267 + ], + [ + 1119, + 266 + ], + [ + 1109, + 275 + ], + [ + 1107, + 284 + ], + [ + 1109, + 290 + ], + [ + 1107, + 292 + ], + [ + 1099, + 292 + ], + [ + 1095, + 310 + ], + [ + 1099, + 315 + ], + [ + 1106, + 315 + ], + [ + 1111, + 312 + ], + [ + 1113, + 315 + ], + [ + 1107, + 318 + ], + [ + 1102, + 331 + ], + [ + 1108, + 331 + ], + [ + 1108, + 337 + ], + [ + 1106, + 349 + ], + [ + 1106, + 358 + ], + [ + 1103, + 360 + ], + [ + 1099, + 362 + ], + [ + 1095, + 366 + ], + [ + 1093, + 364 + ], + [ + 1088, + 361 + ], + [ + 1079, + 355 + ], + [ + 1073, + 355 + ], + [ + 1066, + 355 + ], + [ + 1058, + 351 + ], + [ + 1055, + 348 + ], + [ + 1048, + 348 + ], + [ + 1046, + 355 + ], + [ + 1043, + 355 + ], + [ + 1036, + 353 + ], + [ + 1031, + 357 + ], + [ + 1029, + 366 + ], + [ + 1029, + 370 + ], + [ + 1022, + 366 + ], + [ + 1014, + 370 + ], + [ + 1009, + 378 + ], + [ + 1005, + 386 + ], + [ + 1006, + 395 + ], + [ + 1006, + 404 + ], + [ + 1003, + 411 + ], + [ + 1006, + 417 + ], + [ + 1014, + 418 + ], + [ + 1018, + 421 + ], + [ + 1020, + 427 + ], + [ + 1019, + 438 + ], + [ + 1022, + 439 + ], + [ + 1021, + 433 + ], + [ + 1022, + 424 + ], + [ + 1024, + 421 + ], + [ + 1029, + 419 + ], + [ + 1031, + 426 + ], + [ + 1031, + 439 + ], + [ + 1031, + 439 + ], + [ + 1032, + 439 + ], + [ + 1032, + 433 + ], + [ + 1035, + 427 + ], + [ + 1039, + 427 + ], + [ + 1038, + 435 + ], + [ + 1038, + 440 + ], + [ + 1040, + 440 + ], + [ + 1041, + 434 + ], + [ + 1042, + 429 + ], + [ + 1045, + 429 + ], + [ + 1045, + 438 + ], + [ + 1050, + 442 + ], + [ + 1053, + 443 + ], + [ + 1054, + 439 + ], + [ + 1058, + 425 + ], + [ + 1061, + 423 + ], + [ + 1062, + 429 + ], + [ + 1062, + 440 + ], + [ + 1065, + 443 + ], + [ + 1065, + 441 + ], + [ + 1064, + 432 + ], + [ + 1064, + 425 + ], + [ + 1070, + 420 + ], + [ + 1072, + 421 + ], + [ + 1070, + 435 + ], + [ + 1072, + 438 + ], + [ + 1075, + 433 + ], + [ + 1075, + 424 + ], + [ + 1083, + 421 + ], + [ + 1088, + 421 + ], + [ + 1086, + 413 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1132, + 394 + ], + [ + 1133, + 402 + ], + [ + 1136, + 402 + ], + [ + 1138, + 393 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1199, + 428 + ], + [ + 1180, + 428 + ], + [ + 1176, + 432 + ], + [ + 1176, + 442 + ], + [ + 1180, + 451 + ], + [ + 1188, + 457 + ], + [ + 1198, + 458 + ], + [ + 1206, + 452 + ], + [ + 1214, + 450 + ], + [ + 1218, + 450 + ], + [ + 1216, + 423 + ], + [ + 1206, + 424 + ], + [ + 1205, + 430 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1206, + 382 + ], + [ + 1207, + 425 + ], + [ + 1204, + 425 + ], + [ + 1203, + 383 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1218, + 415 + ], + [ + 1208, + 415 + ], + [ + 1207, + 384 + ], + [ + 1218, + 384 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1226, + 403 + ], + [ + 1226, + 452 + ], + [ + 1229, + 451 + ], + [ + 1230, + 402 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1220, + 284 + ], + [ + 1221, + 336 + ], + [ + 1233, + 337 + ], + [ + 1230, + 282 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1206, + 339 + ], + [ + 1224, + 323 + ], + [ + 1239, + 312 + ], + [ + 1245, + 306 + ], + [ + 1252, + 306 + ], + [ + 1262, + 311 + ], + [ + 1279, + 304 + ], + [ + 1295, + 293 + ], + [ + 1308, + 296 + ], + [ + 1308, + 307 + ], + [ + 1306, + 316 + ], + [ + 1313, + 323 + ], + [ + 1326, + 324 + ], + [ + 1331, + 322 + ], + [ + 1339, + 327 + ], + [ + 1341, + 346 + ], + [ + 1356, + 355 + ], + [ + 1361, + 367 + ], + [ + 1361, + 404 + ], + [ + 1363, + 432 + ], + [ + 1360, + 443 + ], + [ + 1335, + 451 + ], + [ + 1284, + 455 + ], + [ + 1240, + 461 + ], + [ + 1228, + 460 + ], + [ + 1235, + 447 + ], + [ + 1246, + 442 + ], + [ + 1252, + 427 + ], + [ + 1249, + 417 + ], + [ + 1247, + 415 + ], + [ + 1230, + 411 + ], + [ + 1218, + 405 + ], + [ + 1212, + 393 + ], + [ + 1201, + 380 + ], + [ + 1203, + 375 + ], + [ + 1199, + 349 + ], + [ + 1199, + 341 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1357, + 342 + ], + [ + 1357, + 452 + ], + [ + 1352, + 453 + ], + [ + 1349, + 339 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1480, + 431 + ], + [ + 1452, + 425 + ], + [ + 1437, + 411 + ], + [ + 1432, + 383 + ], + [ + 1438, + 371 + ], + [ + 1430, + 367 + ], + [ + 1416, + 364 + ], + [ + 1421, + 349 + ], + [ + 1435, + 343 + ], + [ + 1436, + 333 + ], + [ + 1434, + 322 + ], + [ + 1412, + 321 + ], + [ + 1412, + 334 + ], + [ + 1418, + 341 + ], + [ + 1411, + 352 + ], + [ + 1395, + 338 + ], + [ + 1388, + 334 + ], + [ + 1376, + 334 + ], + [ + 1376, + 325 + ], + [ + 1384, + 321 + ], + [ + 1381, + 307 + ], + [ + 1377, + 295 + ], + [ + 1388, + 299 + ], + [ + 1387, + 285 + ], + [ + 1383, + 275 + ], + [ + 1394, + 265 + ], + [ + 1401, + 253 + ], + [ + 1412, + 255 + ], + [ + 1418, + 237 + ], + [ + 1418, + 220 + ], + [ + 1417, + 216 + ], + [ + 1408, + 207 + ], + [ + 1416, + 192 + ], + [ + 1419, + 185 + ], + [ + 1417, + 180 + ], + [ + 1403, + 174 + ], + [ + 1420, + 156 + ], + [ + 1448, + 143 + ], + [ + 1421, + 130 + ], + [ + 1425, + 118 + ], + [ + 1437, + 118 + ], + [ + 1454, + 123 + ], + [ + 1471, + 117 + ], + [ + 1484, + 121 + ], + [ + 1494, + 139 + ], + [ + 1514, + 400 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1471, + 422 + ], + [ + 1472, + 377 + ], + [ + 1475, + 371 + ], + [ + 1473, + 355 + ], + [ + 1469, + 352 + ], + [ + 1469, + 274 + ], + [ + 1474, + 267 + ], + [ + 1475, + 252 + ], + [ + 1469, + 250 + ], + [ + 1470, + 171 + ], + [ + 1474, + 163 + ], + [ + 1474, + 138 + ], + [ + 1525, + 94 + ], + [ + 1525, + 48 + ], + [ + 1513, + 50 + ], + [ + 1503, + 43 + ], + [ + 1503, + 2 + ], + [ + 1503, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 438 + ], + [ + 1469, + 442 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1440, + 309 + ], + [ + 1446, + 310 + ], + [ + 1446, + 430 + ], + [ + 1441, + 430 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1538, + 263 + ], + [ + 1528, + 262 + ], + [ + 1529, + 427 + ], + [ + 1537, + 429 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1641, + 208 + ], + [ + 1654, + 210 + ], + [ + 1654, + 428 + ], + [ + 1642, + 427 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1686, + 202 + ], + [ + 1677, + 202 + ], + [ + 1665, + 193 + ], + [ + 1652, + 193 + ], + [ + 1648, + 181 + ], + [ + 1654, + 171 + ], + [ + 1650, + 162 + ], + [ + 1639, + 163 + ], + [ + 1630, + 177 + ], + [ + 1619, + 185 + ], + [ + 1603, + 192 + ], + [ + 1585, + 195 + ], + [ + 1577, + 195 + ], + [ + 1577, + 187 + ], + [ + 1570, + 182 + ], + [ + 1564, + 196 + ], + [ + 1577, + 220 + ], + [ + 1593, + 234 + ], + [ + 1613, + 236 + ], + [ + 1615, + 239 + ], + [ + 1611, + 247 + ], + [ + 1613, + 261 + ], + [ + 1629, + 262 + ], + [ + 1638, + 265 + ], + [ + 1638, + 281 + ], + [ + 1650, + 300 + ], + [ + 1663, + 303 + ], + [ + 1679, + 300 + ], + [ + 1703, + 306 + ], + [ + 1704, + 312 + ], + [ + 1707, + 330 + ], + [ + 1702, + 344 + ], + [ + 1704, + 356 + ], + [ + 1703, + 368 + ], + [ + 1686, + 362 + ], + [ + 1668, + 378 + ], + [ + 1678, + 389 + ], + [ + 1691, + 400 + ], + [ + 1693, + 404 + ], + [ + 1693, + 417 + ], + [ + 1686, + 412 + ], + [ + 1681, + 401 + ], + [ + 1667, + 395 + ], + [ + 1666, + 416 + ], + [ + 1723, + 427 + ], + [ + 2048, + 422 + ], + [ + 2048, + 0 + ], + [ + 1847, + 0 + ], + [ + 1855, + 35 + ], + [ + 1861, + 72 + ], + [ + 1841, + 86 + ], + [ + 1826, + 74 + ], + [ + 1827, + 66 + ], + [ + 1844, + 58 + ], + [ + 1839, + 31 + ], + [ + 1827, + 16 + ], + [ + 1814, + 16 + ], + [ + 1788, + 20 + ], + [ + 1781, + 1 + ], + [ + 1781, + 0 + ], + [ + 1657, + 0 + ], + [ + 1648, + 20 + ], + [ + 1652, + 49 + ], + [ + 1660, + 68 + ], + [ + 1685, + 67 + ], + [ + 1698, + 72 + ], + [ + 1696, + 81 + ], + [ + 1675, + 81 + ], + [ + 1640, + 79 + ], + [ + 1626, + 93 + ], + [ + 1643, + 121 + ], + [ + 1684, + 135 + ], + [ + 1692, + 145 + ], + [ + 1679, + 149 + ], + [ + 1667, + 158 + ], + [ + 1672, + 186 + ], + [ + 1686, + 194 + ], + [ + 1694, + 199 + ], + [ + 1695, + 204 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1325, + 418 + ], + [ + 1332, + 417 + ], + [ + 1332, + 472 + ], + [ + 1325, + 472 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1368, + 356 + ], + [ + 1368, + 427 + ], + [ + 1289, + 431 + ], + [ + 1290, + 359 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 2035, + 401 + ], + [ + 1338, + 419 + ], + [ + 1332, + 420 + ], + [ + 1332, + 428 + ], + [ + 1363, + 428 + ], + [ + 1363, + 450 + ], + [ + 1365, + 479 + ], + [ + 2048, + 533 + ], + [ + 2048, + 402 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1403, + 425 + ], + [ + 1381, + 429 + ], + [ + 1370, + 448 + ], + [ + 1372, + 467 + ], + [ + 1417, + 467 + ], + [ + 1418, + 447 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 2026, + 873 + ], + [ + 1464, + 580 + ], + [ + 1345, + 524 + ], + [ + 1321, + 513 + ], + [ + 1322, + 495 + ], + [ + 1322, + 494 + ], + [ + 1280, + 495 + ], + [ + 1237, + 478 + ], + [ + 1225, + 464 + ], + [ + 1221, + 463 + ], + [ + 1226, + 448 + ], + [ + 1246, + 450 + ], + [ + 1263, + 449 + ], + [ + 1300, + 448 + ], + [ + 1328, + 449 + ], + [ + 1363, + 453 + ], + [ + 1560, + 460 + ], + [ + 2023, + 473 + ], + [ + 2048, + 474 + ], + [ + 2048, + 885 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1274, + 414 + ], + [ + 1276, + 435 + ], + [ + 1247, + 434 + ], + [ + 1245, + 414 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1255, + 358 + ], + [ + 1261, + 358 + ], + [ + 1262, + 492 + ], + [ + 1258, + 491 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1253, + 381 + ], + [ + 1250, + 372 + ], + [ + 1250, + 365 + ], + [ + 1252, + 360 + ], + [ + 1259, + 359 + ], + [ + 1261, + 367 + ], + [ + 1261, + 373 + ], + [ + 1258, + 380 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1080, + 43 + ], + [ + 1113, + 43 + ], + [ + 1113, + 52 + ], + [ + 1080, + 53 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1068, + 199 + ], + [ + 1089, + 198 + ], + [ + 1088, + 202 + ], + [ + 1086, + 206 + ], + [ + 1071, + 204 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1098, + 262 + ], + [ + 1111, + 263 + ], + [ + 1111, + 267 + ], + [ + 1096, + 268 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1052, + 288 + ], + [ + 1064, + 288 + ], + [ + 1064, + 293 + ], + [ + 1050, + 293 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1070, + 323 + ], + [ + 1078, + 323 + ], + [ + 1078, + 327 + ], + [ + 1069, + 328 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1085, + 339 + ], + [ + 1092, + 339 + ], + [ + 1090, + 342 + ], + [ + 1084, + 343 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1101, + 343 + ], + [ + 1095, + 344 + ], + [ + 1093, + 348 + ], + [ + 1097, + 348 + ], + [ + 1102, + 348 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1116, + 322 + ], + [ + 1216, + 338 + ], + [ + 1213, + 340 + ], + [ + 1116, + 327 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1122, + 317 + ], + [ + 1121, + 343 + ], + [ + 1108, + 340 + ], + [ + 1109, + 314 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 997, + 440 + ], + [ + 996, + 426 + ], + [ + 1001, + 416 + ], + [ + 1006, + 409 + ], + [ + 1007, + 393 + ], + [ + 1004, + 387 + ], + [ + 999, + 384 + ], + [ + 992, + 381 + ], + [ + 989, + 375 + ], + [ + 987, + 365 + ], + [ + 983, + 363 + ], + [ + 977, + 364 + ], + [ + 978, + 366 + ], + [ + 974, + 366 + ], + [ + 973, + 367 + ], + [ + 968, + 373 + ], + [ + 968, + 380 + ], + [ + 964, + 389 + ], + [ + 962, + 394 + ], + [ + 962, + 411 + ], + [ + 962, + 416 + ], + [ + 966, + 419 + ], + [ + 974, + 419 + ], + [ + 976, + 420 + ], + [ + 977, + 428 + ], + [ + 977, + 440 + ], + [ + 980, + 443 + ], + [ + 981, + 442 + ], + [ + 982, + 435 + ], + [ + 982, + 429 + ], + [ + 986, + 425 + ], + [ + 989, + 422 + ], + [ + 992, + 423 + ], + [ + 993, + 435 + ], + [ + 993, + 443 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 969, + 439 + ], + [ + 961, + 440 + ], + [ + 956, + 441 + ], + [ + 952, + 441 + ], + [ + 952, + 447 + ], + [ + 957, + 455 + ], + [ + 962, + 459 + ], + [ + 967, + 459 + ], + [ + 970, + 462 + ], + [ + 979, + 460 + ], + [ + 987, + 460 + ], + [ + 993, + 460 + ], + [ + 997, + 460 + ], + [ + 999, + 463 + ], + [ + 1003, + 463 + ], + [ + 1006, + 460 + ], + [ + 1014, + 459 + ], + [ + 1016, + 459 + ], + [ + 1017, + 461 + ], + [ + 1021, + 460 + ], + [ + 1023, + 460 + ], + [ + 1025, + 462 + ], + [ + 1029, + 460 + ], + [ + 1034, + 459 + ], + [ + 1043, + 459 + ], + [ + 1043, + 461 + ], + [ + 1047, + 461 + ], + [ + 1048, + 458 + ], + [ + 1050, + 458 + ], + [ + 1054, + 455 + ], + [ + 1054, + 449 + ], + [ + 1050, + 445 + ], + [ + 1046, + 442 + ], + [ + 1044, + 439 + ], + [ + 1042, + 436 + ], + [ + 1035, + 435 + ], + [ + 1024, + 437 + ], + [ + 1021, + 437 + ], + [ + 1019, + 434 + ], + [ + 1001, + 434 + ], + [ + 997, + 435 + ], + [ + 995, + 438 + ], + [ + 992, + 441 + ], + [ + 986, + 440 + ], + [ + 981, + 439 + ], + [ + 976, + 439 + ], + [ + 974, + 439 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 963, + 462 + ], + [ + 953, + 463 + ], + [ + 943, + 462 + ], + [ + 940, + 457 + ], + [ + 944, + 454 + ], + [ + 953, + 452 + ], + [ + 962, + 452 + ], + [ + 966, + 453 + ], + [ + 967, + 458 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 962, + 427 + ], + [ + 961, + 422 + ], + [ + 954, + 422 + ], + [ + 954, + 426 + ], + [ + 954, + 428 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 952, + 422 + ], + [ + 953, + 408 + ], + [ + 939, + 409 + ], + [ + 939, + 422 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 931, + 422 + ], + [ + 926, + 420 + ], + [ + 923, + 414 + ], + [ + 926, + 411 + ], + [ + 930, + 412 + ], + [ + 934, + 415 + ], + [ + 934, + 418 + ], + [ + 933, + 420 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 934, + 444 + ], + [ + 934, + 433 + ], + [ + 934, + 422 + ], + [ + 934, + 420 + ], + [ + 931, + 417 + ], + [ + 927, + 416 + ], + [ + 922, + 412 + ], + [ + 922, + 408 + ], + [ + 917, + 405 + ], + [ + 914, + 402 + ], + [ + 916, + 400 + ], + [ + 920, + 396 + ], + [ + 918, + 390 + ], + [ + 912, + 389 + ], + [ + 910, + 385 + ], + [ + 909, + 379 + ], + [ + 904, + 374 + ], + [ + 900, + 366 + ], + [ + 900, + 358 + ], + [ + 906, + 357 + ], + [ + 909, + 351 + ], + [ + 908, + 346 + ], + [ + 915, + 339 + ], + [ + 919, + 334 + ], + [ + 922, + 337 + ], + [ + 926, + 341 + ], + [ + 929, + 343 + ], + [ + 933, + 345 + ], + [ + 941, + 347 + ], + [ + 948, + 353 + ], + [ + 951, + 361 + ], + [ + 949, + 369 + ], + [ + 948, + 372 + ], + [ + 954, + 371 + ], + [ + 955, + 375 + ], + [ + 952, + 381 + ], + [ + 948, + 387 + ], + [ + 950, + 388 + ], + [ + 956, + 386 + ], + [ + 958, + 394 + ], + [ + 955, + 403 + ], + [ + 956, + 407 + ], + [ + 951, + 410 + ], + [ + 946, + 414 + ], + [ + 942, + 421 + ], + [ + 939, + 426 + ], + [ + 939, + 446 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 921, + 422 + ], + [ + 920, + 404 + ], + [ + 903, + 405 + ], + [ + 904, + 423 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 940, + 463 + ], + [ + 938, + 463 + ], + [ + 935, + 463 + ], + [ + 929, + 463 + ], + [ + 922, + 461 + ], + [ + 918, + 451 + ], + [ + 918, + 445 + ], + [ + 922, + 442 + ], + [ + 926, + 441 + ], + [ + 933, + 440 + ], + [ + 937, + 440 + ], + [ + 940, + 440 + ], + [ + 944, + 442 + ], + [ + 947, + 446 + ], + [ + 948, + 450 + ], + [ + 949, + 454 + ], + [ + 948, + 457 + ], + [ + 943, + 459 + ], + [ + 942, + 461 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 898, + 438 + ], + [ + 903, + 437 + ], + [ + 910, + 441 + ], + [ + 915, + 445 + ], + [ + 919, + 456 + ], + [ + 917, + 461 + ], + [ + 908, + 461 + ], + [ + 900, + 460 + ], + [ + 894, + 459 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 880, + 388 + ], + [ + 881, + 412 + ], + [ + 874, + 412 + ], + [ + 874, + 389 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 880, + 427 + ], + [ + 880, + 464 + ], + [ + 877, + 463 + ], + [ + 877, + 390 + ], + [ + 878, + 390 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 872, + 466 + ], + [ + 870, + 375 + ], + [ + 871, + 369 + ], + [ + 876, + 359 + ], + [ + 880, + 356 + ], + [ + 927, + 346 + ], + [ + 940, + 346 + ], + [ + 955, + 346 + ], + [ + 960, + 346 + ], + [ + 960, + 348 + ], + [ + 928, + 348 + ], + [ + 882, + 358 + ], + [ + 877, + 360 + ], + [ + 874, + 363 + ], + [ + 872, + 371 + ], + [ + 872, + 378 + ], + [ + 875, + 466 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 865, + 389 + ], + [ + 865, + 400 + ], + [ + 877, + 400 + ], + [ + 877, + 388 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 870, + 423 + ], + [ + 870, + 407 + ], + [ + 876, + 407 + ], + [ + 876, + 423 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 965, + 362 + ], + [ + 966, + 343 + ], + [ + 955, + 343 + ], + [ + 955, + 362 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 840, + 411 + ], + [ + 841, + 471 + ], + [ + 844, + 471 + ], + [ + 842, + 407 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 841, + 398 + ], + [ + 845, + 397 + ], + [ + 847, + 397 + ], + [ + 847, + 399 + ], + [ + 844, + 400 + ], + [ + 843, + 402 + ], + [ + 845, + 402 + ], + [ + 847, + 402 + ], + [ + 847, + 403 + ], + [ + 846, + 404 + ], + [ + 844, + 405 + ], + [ + 844, + 408 + ], + [ + 847, + 408 + ], + [ + 848, + 408 + ], + [ + 847, + 409 + ], + [ + 844, + 411 + ], + [ + 841, + 413 + ], + [ + 840, + 411 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 837, + 396 + ], + [ + 843, + 396 + ], + [ + 843, + 417 + ], + [ + 837, + 418 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 21, + 713 + ], + [ + 927, + 500 + ], + [ + 969, + 492 + ], + [ + 983, + 486 + ], + [ + 982, + 480 + ], + [ + 966, + 477 + ], + [ + 942, + 474 + ], + [ + 920, + 473 + ], + [ + 873, + 479 + ], + [ + 653, + 508 + ], + [ + 324, + 555 + ], + [ + 60, + 587 + ], + [ + 0, + 596 + ], + [ + 0, + 721 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 921, + 435 + ], + [ + 915, + 433 + ], + [ + 910, + 436 + ], + [ + 909, + 444 + ], + [ + 912, + 450 + ], + [ + 920, + 451 + ], + [ + 924, + 446 + ], + [ + 926, + 441 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 932, + 467 + ], + [ + 934, + 476 + ], + [ + 918, + 477 + ], + [ + 909, + 452 + ], + [ + 925, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 925, + 479 + ], + [ + 913, + 436 + ], + [ + 916, + 436 + ], + [ + 926, + 476 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 802, + 442 + ], + [ + 802, + 424 + ], + [ + 799, + 399 + ], + [ + 794, + 387 + ], + [ + 775, + 381 + ], + [ + 770, + 375 + ], + [ + 776, + 371 + ], + [ + 763, + 365 + ], + [ + 767, + 359 + ], + [ + 773, + 353 + ], + [ + 770, + 345 + ], + [ + 752, + 339 + ], + [ + 736, + 316 + ], + [ + 727, + 278 + ], + [ + 714, + 256 + ], + [ + 716, + 235 + ], + [ + 725, + 221 + ], + [ + 736, + 228 + ], + [ + 749, + 228 + ], + [ + 765, + 228 + ], + [ + 779, + 239 + ], + [ + 792, + 245 + ], + [ + 782, + 258 + ], + [ + 782, + 268 + ], + [ + 796, + 261 + ], + [ + 810, + 266 + ], + [ + 827, + 256 + ], + [ + 843, + 249 + ], + [ + 851, + 272 + ], + [ + 856, + 284 + ], + [ + 860, + 267 + ], + [ + 872, + 252 + ], + [ + 889, + 251 + ], + [ + 911, + 275 + ], + [ + 905, + 297 + ], + [ + 909, + 301 + ], + [ + 907, + 313 + ], + [ + 901, + 321 + ], + [ + 899, + 328 + ], + [ + 902, + 332 + ], + [ + 894, + 341 + ], + [ + 879, + 342 + ], + [ + 866, + 345 + ], + [ + 871, + 350 + ], + [ + 871, + 356 + ], + [ + 851, + 362 + ], + [ + 840, + 377 + ], + [ + 829, + 388 + ], + [ + 821, + 398 + ], + [ + 812, + 415 + ], + [ + 809, + 434 + ], + [ + 809, + 447 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 825, + 419 + ], + [ + 833, + 417 + ], + [ + 835, + 411 + ], + [ + 831, + 406 + ], + [ + 827, + 405 + ], + [ + 820, + 409 + ], + [ + 819, + 418 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 786, + 440 + ], + [ + 790, + 436 + ], + [ + 803, + 436 + ], + [ + 813, + 436 + ], + [ + 818, + 442 + ], + [ + 828, + 455 + ], + [ + 829, + 461 + ], + [ + 827, + 467 + ], + [ + 827, + 472 + ], + [ + 823, + 472 + ], + [ + 813, + 470 + ], + [ + 803, + 469 + ], + [ + 792, + 463 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 767, + 443 + ], + [ + 774, + 438 + ], + [ + 779, + 437 + ], + [ + 794, + 439 + ], + [ + 799, + 441 + ], + [ + 802, + 448 + ], + [ + 803, + 450 + ], + [ + 805, + 459 + ], + [ + 805, + 470 + ], + [ + 805, + 475 + ], + [ + 803, + 477 + ], + [ + 801, + 474 + ], + [ + 801, + 473 + ], + [ + 795, + 473 + ], + [ + 795, + 476 + ], + [ + 791, + 477 + ], + [ + 788, + 477 + ], + [ + 788, + 475 + ], + [ + 779, + 473 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 717, + 449 + ], + [ + 719, + 425 + ], + [ + 719, + 377 + ], + [ + 720, + 372 + ], + [ + 725, + 369 + ], + [ + 724, + 366 + ], + [ + 730, + 365 + ], + [ + 735, + 366 + ], + [ + 738, + 360 + ], + [ + 746, + 356 + ], + [ + 760, + 357 + ], + [ + 773, + 346 + ], + [ + 760, + 268 + ], + [ + 745, + 244 + ], + [ + 734, + 253 + ], + [ + 724, + 258 + ], + [ + 714, + 262 + ], + [ + 705, + 259 + ], + [ + 698, + 253 + ], + [ + 691, + 264 + ], + [ + 682, + 279 + ], + [ + 676, + 299 + ], + [ + 678, + 311 + ], + [ + 674, + 320 + ], + [ + 657, + 318 + ], + [ + 662, + 331 + ], + [ + 682, + 329 + ], + [ + 685, + 333 + ], + [ + 691, + 338 + ], + [ + 693, + 345 + ], + [ + 694, + 361 + ], + [ + 698, + 371 + ], + [ + 693, + 369 + ], + [ + 693, + 375 + ], + [ + 698, + 380 + ], + [ + 700, + 384 + ], + [ + 705, + 386 + ], + [ + 705, + 378 + ], + [ + 711, + 383 + ], + [ + 713, + 387 + ], + [ + 713, + 448 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 741, + 432 + ], + [ + 741, + 403 + ], + [ + 730, + 404 + ], + [ + 731, + 435 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 734, + 438 + ], + [ + 734, + 404 + ], + [ + 736, + 404 + ], + [ + 736, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 775, + 430 + ], + [ + 746, + 429 + ], + [ + 738, + 429 + ], + [ + 731, + 429 + ], + [ + 726, + 431 + ], + [ + 720, + 439 + ], + [ + 718, + 445 + ], + [ + 716, + 463 + ], + [ + 724, + 475 + ], + [ + 736, + 477 + ], + [ + 746, + 477 + ], + [ + 755, + 475 + ], + [ + 759, + 476 + ], + [ + 759, + 480 + ], + [ + 768, + 481 + ], + [ + 770, + 480 + ], + [ + 771, + 475 + ], + [ + 775, + 475 + ], + [ + 775, + 478 + ], + [ + 779, + 480 + ], + [ + 782, + 480 + ], + [ + 783, + 476 + ], + [ + 783, + 467 + ], + [ + 782, + 456 + ], + [ + 782, + 452 + ], + [ + 781, + 444 + ], + [ + 778, + 438 + ], + [ + 778, + 435 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 629, + 458 + ], + [ + 634, + 456 + ], + [ + 639, + 453 + ], + [ + 640, + 451 + ], + [ + 638, + 449 + ], + [ + 637, + 445 + ], + [ + 639, + 444 + ], + [ + 645, + 444 + ], + [ + 650, + 446 + ], + [ + 650, + 448 + ], + [ + 648, + 452 + ], + [ + 643, + 456 + ], + [ + 642, + 458 + ], + [ + 644, + 464 + ], + [ + 657, + 470 + ], + [ + 659, + 465 + ], + [ + 657, + 462 + ], + [ + 654, + 457 + ], + [ + 660, + 455 + ], + [ + 668, + 455 + ], + [ + 677, + 453 + ], + [ + 681, + 460 + ], + [ + 682, + 470 + ], + [ + 684, + 474 + ], + [ + 684, + 479 + ], + [ + 682, + 484 + ], + [ + 672, + 482 + ], + [ + 669, + 477 + ], + [ + 665, + 477 + ], + [ + 662, + 485 + ], + [ + 661, + 485 + ], + [ + 664, + 478 + ], + [ + 663, + 476 + ], + [ + 656, + 475 + ], + [ + 648, + 476 + ], + [ + 644, + 475 + ], + [ + 639, + 468 + ], + [ + 635, + 466 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 657, + 408 + ], + [ + 656, + 392 + ], + [ + 641, + 393 + ], + [ + 642, + 408 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 545, + 459 + ], + [ + 549, + 452 + ], + [ + 558, + 442 + ], + [ + 561, + 439 + ], + [ + 565, + 437 + ], + [ + 569, + 436 + ], + [ + 574, + 433 + ], + [ + 580, + 433 + ], + [ + 584, + 435 + ], + [ + 589, + 436 + ], + [ + 606, + 437 + ], + [ + 613, + 434 + ], + [ + 617, + 435 + ], + [ + 621, + 436 + ], + [ + 623, + 438 + ], + [ + 631, + 451 + ], + [ + 636, + 461 + ], + [ + 636, + 466 + ], + [ + 638, + 476 + ], + [ + 636, + 481 + ], + [ + 633, + 485 + ], + [ + 632, + 488 + ], + [ + 632, + 493 + ], + [ + 624, + 494 + ], + [ + 621, + 492 + ], + [ + 618, + 485 + ], + [ + 605, + 488 + ], + [ + 602, + 492 + ], + [ + 598, + 495 + ], + [ + 593, + 496 + ], + [ + 591, + 492 + ], + [ + 591, + 489 + ], + [ + 581, + 488 + ], + [ + 569, + 486 + ], + [ + 555, + 482 + ], + [ + 543, + 477 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 530, + 483 + ], + [ + 529, + 410 + ], + [ + 528, + 363 + ], + [ + 528, + 346 + ], + [ + 528, + 333 + ], + [ + 528, + 330 + ], + [ + 519, + 328 + ], + [ + 514, + 328 + ], + [ + 508, + 330 + ], + [ + 506, + 338 + ], + [ + 500, + 342 + ], + [ + 492, + 337 + ], + [ + 488, + 330 + ], + [ + 484, + 322 + ], + [ + 483, + 314 + ], + [ + 493, + 320 + ], + [ + 497, + 320 + ], + [ + 500, + 316 + ], + [ + 499, + 312 + ], + [ + 491, + 311 + ], + [ + 484, + 309 + ], + [ + 476, + 306 + ], + [ + 470, + 301 + ], + [ + 467, + 301 + ], + [ + 460, + 304 + ], + [ + 447, + 303 + ], + [ + 451, + 295 + ], + [ + 457, + 291 + ], + [ + 456, + 287 + ], + [ + 456, + 281 + ], + [ + 450, + 274 + ], + [ + 445, + 273 + ], + [ + 444, + 268 + ], + [ + 449, + 266 + ], + [ + 460, + 272 + ], + [ + 466, + 278 + ], + [ + 473, + 280 + ], + [ + 474, + 281 + ], + [ + 472, + 277 + ], + [ + 468, + 273 + ], + [ + 463, + 270 + ], + [ + 461, + 263 + ], + [ + 461, + 256 + ], + [ + 442, + 250 + ], + [ + 440, + 245 + ], + [ + 441, + 238 + ], + [ + 445, + 235 + ], + [ + 451, + 239 + ], + [ + 454, + 240 + ], + [ + 454, + 234 + ], + [ + 451, + 229 + ], + [ + 447, + 230 + ], + [ + 441, + 231 + ], + [ + 438, + 219 + ], + [ + 438, + 214 + ], + [ + 446, + 213 + ], + [ + 450, + 211 + ], + [ + 451, + 204 + ], + [ + 441, + 199 + ], + [ + 436, + 198 + ], + [ + 434, + 190 + ], + [ + 437, + 187 + ], + [ + 444, + 189 + ], + [ + 453, + 190 + ], + [ + 458, + 193 + ], + [ + 454, + 184 + ], + [ + 450, + 176 + ], + [ + 446, + 173 + ], + [ + 447, + 166 + ], + [ + 455, + 163 + ], + [ + 459, + 168 + ], + [ + 466, + 178 + ], + [ + 470, + 188 + ], + [ + 473, + 187 + ], + [ + 471, + 180 + ], + [ + 467, + 173 + ], + [ + 465, + 148 + ], + [ + 466, + 138 + ], + [ + 473, + 139 + ], + [ + 482, + 140 + ], + [ + 493, + 140 + ], + [ + 504, + 144 + ], + [ + 516, + 133 + ], + [ + 510, + 116 + ], + [ + 511, + 108 + ], + [ + 523, + 101 + ], + [ + 548, + 96 + ], + [ + 694, + 95 + ], + [ + 725, + 95 + ], + [ + 740, + 122 + ], + [ + 730, + 160 + ], + [ + 721, + 169 + ], + [ + 713, + 172 + ], + [ + 718, + 180 + ], + [ + 716, + 186 + ], + [ + 704, + 184 + ], + [ + 698, + 191 + ], + [ + 698, + 205 + ], + [ + 698, + 212 + ], + [ + 704, + 220 + ], + [ + 703, + 228 + ], + [ + 687, + 233 + ], + [ + 659, + 240 + ], + [ + 657, + 250 + ], + [ + 656, + 263 + ], + [ + 650, + 285 + ], + [ + 659, + 300 + ], + [ + 653, + 313 + ], + [ + 643, + 318 + ], + [ + 624, + 319 + ], + [ + 615, + 316 + ], + [ + 613, + 301 + ], + [ + 609, + 294 + ], + [ + 601, + 298 + ], + [ + 599, + 309 + ], + [ + 601, + 323 + ], + [ + 597, + 329 + ], + [ + 580, + 330 + ], + [ + 571, + 325 + ], + [ + 569, + 320 + ], + [ + 558, + 326 + ], + [ + 547, + 327 + ], + [ + 540, + 342 + ], + [ + 536, + 352 + ], + [ + 533, + 360 + ], + [ + 538, + 481 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 500, + 462 + ], + [ + 492, + 463 + ], + [ + 485, + 467 + ], + [ + 478, + 475 + ], + [ + 475, + 483 + ], + [ + 477, + 492 + ], + [ + 482, + 497 + ], + [ + 492, + 497 + ], + [ + 504, + 496 + ], + [ + 509, + 494 + ], + [ + 517, + 494 + ], + [ + 522, + 490 + ], + [ + 527, + 487 + ], + [ + 531, + 490 + ], + [ + 535, + 494 + ], + [ + 545, + 494 + ], + [ + 556, + 490 + ], + [ + 559, + 487 + ], + [ + 561, + 480 + ], + [ + 559, + 470 + ], + [ + 553, + 462 + ], + [ + 552, + 456 + ], + [ + 556, + 455 + ], + [ + 561, + 451 + ], + [ + 559, + 446 + ], + [ + 551, + 448 + ], + [ + 545, + 450 + ], + [ + 541, + 451 + ], + [ + 536, + 446 + ], + [ + 533, + 450 + ], + [ + 532, + 455 + ], + [ + 529, + 456 + ], + [ + 521, + 456 + ], + [ + 513, + 453 + ], + [ + 508, + 451 + ], + [ + 505, + 447 + ], + [ + 501, + 447 + ], + [ + 500, + 453 + ], + [ + 503, + 459 + ], + [ + 503, + 462 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 264, + 315 + ], + [ + 268, + 456 + ], + [ + 275, + 457 + ], + [ + 270, + 312 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 225, + 367 + ], + [ + 224, + 327 + ], + [ + 179, + 329 + ], + [ + 179, + 372 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 11, + 259 + ], + [ + 15, + 455 + ], + [ + 21, + 455 + ], + [ + 17, + 259 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 41, + 333 + ], + [ + 13, + 331 + ], + [ + 0, + 333 + ], + [ + 0, + 273 + ], + [ + 14, + 275 + ], + [ + 35, + 278 + ], + [ + 38, + 278 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 35, + 439 + ], + [ + 38, + 392 + ], + [ + 36, + 345 + ], + [ + 33, + 303 + ], + [ + 33, + 282 + ], + [ + 21, + 278 + ], + [ + 9, + 267 + ], + [ + 15, + 239 + ], + [ + 25, + 240 + ], + [ + 31, + 257 + ], + [ + 33, + 239 + ], + [ + 34, + 216 + ], + [ + 24, + 201 + ], + [ + 18, + 177 + ], + [ + 0, + 144 + ], + [ + 0, + 0 + ], + [ + 173, + 0 + ], + [ + 173, + 7 + ], + [ + 186, + 13 + ], + [ + 197, + 11 + ], + [ + 210, + 16 + ], + [ + 230, + 28 + ], + [ + 247, + 43 + ], + [ + 231, + 58 + ], + [ + 211, + 66 + ], + [ + 182, + 82 + ], + [ + 171, + 98 + ], + [ + 169, + 112 + ], + [ + 187, + 121 + ], + [ + 188, + 138 + ], + [ + 177, + 150 + ], + [ + 160, + 145 + ], + [ + 145, + 145 + ], + [ + 127, + 138 + ], + [ + 113, + 140 + ], + [ + 125, + 147 + ], + [ + 117, + 159 + ], + [ + 99, + 157 + ], + [ + 97, + 165 + ], + [ + 103, + 174 + ], + [ + 85, + 182 + ], + [ + 79, + 180 + ], + [ + 74, + 173 + ], + [ + 62, + 182 + ], + [ + 56, + 192 + ], + [ + 57, + 198 + ], + [ + 48, + 208 + ], + [ + 46, + 219 + ], + [ + 50, + 454 + ], + [ + 50, + 454 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 342, + 439 + ], + [ + 352, + 428 + ], + [ + 363, + 422 + ], + [ + 379, + 423 + ], + [ + 389, + 424 + ], + [ + 401, + 425 + ], + [ + 420, + 425 + ], + [ + 431, + 424 + ], + [ + 442, + 425 + ], + [ + 450, + 427 + ], + [ + 453, + 429 + ], + [ + 460, + 434 + ], + [ + 467, + 453 + ], + [ + 471, + 467 + ], + [ + 472, + 487 + ], + [ + 472, + 496 + ], + [ + 468, + 510 + ], + [ + 464, + 514 + ], + [ + 454, + 513 + ], + [ + 447, + 506 + ], + [ + 445, + 500 + ], + [ + 408, + 505 + ], + [ + 407, + 517 + ], + [ + 402, + 517 + ], + [ + 395, + 511 + ], + [ + 394, + 508 + ], + [ + 388, + 508 + ], + [ + 385, + 517 + ], + [ + 379, + 518 + ], + [ + 372, + 516 + ], + [ + 367, + 508 + ], + [ + 331, + 505 + ], + [ + 330, + 515 + ], + [ + 323, + 519 + ], + [ + 313, + 519 + ], + [ + 309, + 510 + ], + [ + 308, + 503 + ], + [ + 297, + 501 + ], + [ + 297, + 492 + ], + [ + 301, + 480 + ], + [ + 305, + 472 + ], + [ + 311, + 466 + ], + [ + 332, + 458 + ], + [ + 326, + 457 + ], + [ + 325, + 453 + ], + [ + 330, + 448 + ], + [ + 334, + 449 + ], + [ + 336, + 449 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 135, + 464 + ], + [ + 146, + 455 + ], + [ + 155, + 447 + ], + [ + 173, + 440 + ], + [ + 195, + 436 + ], + [ + 222, + 436 + ], + [ + 243, + 437 + ], + [ + 267, + 443 + ], + [ + 281, + 453 + ], + [ + 290, + 464 + ], + [ + 298, + 477 + ], + [ + 304, + 485 + ], + [ + 305, + 503 + ], + [ + 303, + 522 + ], + [ + 299, + 527 + ], + [ + 288, + 530 + ], + [ + 280, + 527 + ], + [ + 276, + 518 + ], + [ + 238, + 519 + ], + [ + 235, + 526 + ], + [ + 232, + 532 + ], + [ + 220, + 536 + ], + [ + 210, + 533 + ], + [ + 203, + 526 + ], + [ + 201, + 521 + ], + [ + 161, + 527 + ], + [ + 140, + 523 + ], + [ + 125, + 502 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 127, + 534 + ], + [ + 0, + 548 + ], + [ + 0, + 443 + ], + [ + 7, + 443 + ], + [ + 35, + 436 + ], + [ + 57, + 429 + ], + [ + 82, + 429 + ], + [ + 110, + 436 + ], + [ + 135, + 445 + ], + [ + 152, + 463 + ], + [ + 159, + 488 + ], + [ + 155, + 507 + ], + [ + 151, + 524 + ], + [ + 148, + 534 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 893, + 484 + ], + [ + 889, + 422 + ], + [ + 889, + 324 + ], + [ + 888, + 258 + ], + [ + 875, + 261 + ], + [ + 864, + 269 + ], + [ + 843, + 263 + ], + [ + 837, + 263 + ], + [ + 814, + 252 + ], + [ + 800, + 256 + ], + [ + 790, + 243 + ], + [ + 769, + 225 + ], + [ + 744, + 239 + ], + [ + 739, + 226 + ], + [ + 737, + 220 + ], + [ + 726, + 226 + ], + [ + 730, + 205 + ], + [ + 740, + 186 + ], + [ + 740, + 175 + ], + [ + 726, + 173 + ], + [ + 720, + 159 + ], + [ + 706, + 0 + ], + [ + 726, + 0 + ], + [ + 732, + 3 + ], + [ + 740, + 13 + ], + [ + 746, + 4 + ], + [ + 755, + 6 + ], + [ + 759, + 22 + ], + [ + 772, + 36 + ], + [ + 772, + 48 + ], + [ + 776, + 51 + ], + [ + 779, + 35 + ], + [ + 791, + 21 + ], + [ + 803, + 24 + ], + [ + 814, + 29 + ], + [ + 829, + 29 + ], + [ + 843, + 29 + ], + [ + 856, + 31 + ], + [ + 872, + 28 + ], + [ + 881, + 25 + ], + [ + 879, + 13 + ], + [ + 890, + 7 + ], + [ + 895, + 20 + ], + [ + 902, + 23 + ], + [ + 918, + 21 + ], + [ + 932, + 7 + ], + [ + 936, + 17 + ], + [ + 941, + 24 + ], + [ + 946, + 35 + ], + [ + 952, + 34 + ], + [ + 958, + 15 + ], + [ + 971, + 7 + ], + [ + 980, + 12 + ], + [ + 987, + 28 + ], + [ + 987, + 48 + ], + [ + 991, + 73 + ], + [ + 993, + 94 + ], + [ + 1002, + 111 + ], + [ + 1005, + 129 + ], + [ + 1007, + 147 + ], + [ + 1010, + 150 + ], + [ + 1010, + 158 + ], + [ + 1006, + 162 + ], + [ + 1008, + 169 + ], + [ + 1019, + 175 + ], + [ + 1019, + 185 + ], + [ + 1009, + 187 + ], + [ + 1016, + 194 + ], + [ + 1023, + 194 + ], + [ + 1035, + 201 + ], + [ + 1033, + 210 + ], + [ + 1024, + 219 + ], + [ + 1025, + 232 + ], + [ + 1018, + 240 + ], + [ + 1012, + 236 + ], + [ + 999, + 244 + ], + [ + 988, + 253 + ], + [ + 976, + 250 + ], + [ + 962, + 246 + ], + [ + 960, + 253 + ], + [ + 960, + 259 + ], + [ + 951, + 263 + ], + [ + 936, + 265 + ], + [ + 928, + 265 + ], + [ + 905, + 258 + ], + [ + 906, + 267 + ], + [ + 905, + 284 + ], + [ + 899, + 351 + ], + [ + 903, + 486 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 829, + 495 + ], + [ + 821, + 495 + ], + [ + 818, + 460 + ], + [ + 818, + 408 + ], + [ + 816, + 299 + ], + [ + 810, + 270 + ], + [ + 814, + 239 + ], + [ + 749, + 155 + ], + [ + 730, + 141 + ], + [ + 746, + 112 + ], + [ + 773, + 64 + ], + [ + 803, + 36 + ], + [ + 844, + 28 + ], + [ + 882, + 23 + ], + [ + 903, + 22 + ], + [ + 985, + 48 + ], + [ + 975, + 109 + ], + [ + 995, + 200 + ], + [ + 903, + 198 + ], + [ + 815, + 268 + ], + [ + 824, + 313 + ], + [ + 823, + 381 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 687, + 511 + ], + [ + 671, + 511 + ], + [ + 674, + 451 + ], + [ + 675, + 343 + ], + [ + 674, + 260 + ], + [ + 672, + 179 + ], + [ + 602, + 143 + ], + [ + 543, + 121 + ], + [ + 551, + 99 + ], + [ + 550, + 86 + ], + [ + 542, + 83 + ], + [ + 521, + 74 + ], + [ + 524, + 67 + ], + [ + 540, + 70 + ], + [ + 543, + 56 + ], + [ + 544, + 39 + ], + [ + 560, + 39 + ], + [ + 567, + 32 + ], + [ + 563, + 21 + ], + [ + 576, + 5 + ], + [ + 585, + 0 + ], + [ + 716, + 0 + ], + [ + 721, + 11 + ], + [ + 736, + 114 + ], + [ + 715, + 137 + ], + [ + 694, + 168 + ], + [ + 682, + 284 + ], + [ + 680, + 309 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 597, + 103 + ], + [ + 610, + 103 + ], + [ + 621, + 530 + ], + [ + 602, + 531 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 692, + 105 + ], + [ + 692, + 140 + ], + [ + 506, + 142 + ], + [ + 505, + 110 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 692, + 145 + ], + [ + 692, + 195 + ], + [ + 506, + 195 + ], + [ + 504, + 149 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 506, + 248 + ], + [ + 507, + 201 + ], + [ + 695, + 197 + ], + [ + 694, + 246 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 508, + 303 + ], + [ + 508, + 254 + ], + [ + 699, + 251 + ], + [ + 698, + 299 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 426, + 555 + ], + [ + 397, + 557 + ], + [ + 402, + 478 + ], + [ + 406, + 364 + ], + [ + 408, + 266 + ], + [ + 408, + 235 + ], + [ + 408, + 192 + ], + [ + 410, + 180 + ], + [ + 415, + 166 + ], + [ + 410, + 132 + ], + [ + 407, + 114 + ], + [ + 411, + 102 + ], + [ + 411, + 58 + ], + [ + 405, + 43 + ], + [ + 389, + 14 + ], + [ + 380, + 0 + ], + [ + 489, + 0 + ], + [ + 489, + 6 + ], + [ + 479, + 20 + ], + [ + 465, + 39 + ], + [ + 452, + 56 + ], + [ + 439, + 58 + ], + [ + 430, + 65 + ], + [ + 429, + 84 + ], + [ + 432, + 103 + ], + [ + 436, + 106 + ], + [ + 435, + 111 + ], + [ + 433, + 124 + ], + [ + 435, + 132 + ], + [ + 436, + 137 + ], + [ + 432, + 147 + ], + [ + 432, + 159 + ], + [ + 435, + 199 + ], + [ + 431, + 220 + ], + [ + 427, + 293 + ], + [ + 425, + 346 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1109, + 470 + ], + [ + 1100, + 467 + ], + [ + 1087, + 457 + ], + [ + 1086, + 440 + ], + [ + 1090, + 435 + ], + [ + 1101, + 436 + ], + [ + 1105, + 442 + ], + [ + 1110, + 442 + ], + [ + 1112, + 444 + ], + [ + 1111, + 447 + ], + [ + 1109, + 449 + ], + [ + 1109, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1073, + 468 + ], + [ + 1065, + 470 + ], + [ + 1063, + 472 + ], + [ + 1057, + 472 + ], + [ + 1056, + 465 + ], + [ + 1056, + 453 + ], + [ + 1059, + 443 + ], + [ + 1062, + 438 + ], + [ + 1066, + 431 + ], + [ + 1083, + 430 + ], + [ + 1095, + 432 + ], + [ + 1095, + 436 + ], + [ + 1101, + 444 + ], + [ + 1104, + 458 + ], + [ + 1102, + 471 + ], + [ + 1099, + 472 + ], + [ + 1095, + 471 + ], + [ + 1094, + 466 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1147, + 465 + ], + [ + 1147, + 471 + ], + [ + 1142, + 470 + ], + [ + 1141, + 453 + ], + [ + 1141, + 444 + ], + [ + 1145, + 440 + ], + [ + 1148, + 432 + ], + [ + 1165, + 432 + ], + [ + 1173, + 431 + ], + [ + 1178, + 440 + ], + [ + 1181, + 443 + ], + [ + 1181, + 453 + ], + [ + 1181, + 468 + ], + [ + 1178, + 470 + ], + [ + 1174, + 468 + ], + [ + 1174, + 465 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 730, + 481 + ], + [ + 716, + 482 + ], + [ + 693, + 483 + ], + [ + 688, + 477 + ], + [ + 683, + 472 + ], + [ + 686, + 458 + ], + [ + 686, + 449 + ], + [ + 686, + 444 + ], + [ + 691, + 440 + ], + [ + 700, + 438 + ], + [ + 708, + 437 + ], + [ + 717, + 442 + ], + [ + 725, + 450 + ], + [ + 732, + 458 + ], + [ + 738, + 467 + ], + [ + 740, + 477 + ], + [ + 738, + 480 + ], + [ + 734, + 481 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..b5ac5c1edfbe0112e1446e04996b93661491f9a5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..018e4d22aca54afcab4007d01cd8864a693d7c04 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d9fbc71606bb35517afb54a6f8ec65bbc5dea912 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000143_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5c76abaffb709c7b290f83c0aac017c57656b7ea Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..153f8d20882cdb59c34603354d7c09b59bc9b4f9 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..14fd6295956d4798e4dbe3ca0ed516a52709b7fe --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000144_000019_gtFine_polygons.json @@ -0,0 +1,8131 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 649, + 18 + ], + [ + 871, + 346 + ], + [ + 1114, + 357 + ], + [ + 1222, + 339 + ], + [ + 1317, + 0 + ], + [ + 634, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2040, + 597 + ], + [ + 1379, + 464 + ], + [ + 1223, + 439 + ], + [ + 1125, + 436 + ], + [ + 895, + 442 + ], + [ + 813, + 445 + ], + [ + 172, + 454 + ], + [ + 0, + 511 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 601 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2025, + 673 + ], + [ + 1758, + 604 + ], + [ + 1709, + 582 + ], + [ + 1363, + 480 + ], + [ + 1335, + 480 + ], + [ + 1315, + 481 + ], + [ + 1294, + 484 + ], + [ + 1271, + 477 + ], + [ + 1276, + 470 + ], + [ + 1296, + 466 + ], + [ + 1311, + 451 + ], + [ + 1314, + 436 + ], + [ + 1384, + 431 + ], + [ + 1939, + 477 + ], + [ + 2048, + 491 + ], + [ + 2048, + 681 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 21, + 616 + ], + [ + 494, + 534 + ], + [ + 514, + 524 + ], + [ + 505, + 513 + ], + [ + 432, + 508 + ], + [ + 363, + 506 + ], + [ + 280, + 505 + ], + [ + 241, + 504 + ], + [ + 0, + 516 + ], + [ + 0, + 624 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 255, + 494 + ], + [ + 508, + 494 + ], + [ + 589, + 492 + ], + [ + 689, + 489 + ], + [ + 752, + 485 + ], + [ + 792, + 479 + ], + [ + 814, + 473 + ], + [ + 810, + 467 + ], + [ + 797, + 464 + ], + [ + 801, + 460 + ], + [ + 824, + 444 + ], + [ + 759, + 435 + ], + [ + 200, + 454 + ], + [ + 230, + 498 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1044, + 333 + ], + [ + 1057, + 321 + ], + [ + 1068, + 321 + ], + [ + 1073, + 320 + ], + [ + 1066, + 313 + ], + [ + 1068, + 307 + ], + [ + 1076, + 308 + ], + [ + 1073, + 299 + ], + [ + 1075, + 298 + ], + [ + 1084, + 298 + ], + [ + 1083, + 275 + ], + [ + 1085, + 274 + ], + [ + 1086, + 299 + ], + [ + 1094, + 293 + ], + [ + 1100, + 294 + ], + [ + 1097, + 298 + ], + [ + 1096, + 306 + ], + [ + 1097, + 310 + ], + [ + 1100, + 310 + ], + [ + 1101, + 314 + ], + [ + 1099, + 319 + ], + [ + 1104, + 321 + ], + [ + 1122, + 316 + ], + [ + 1266, + 88 + ], + [ + 1349, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 526 + ], + [ + 1931, + 510 + ], + [ + 1852, + 502 + ], + [ + 1383, + 466 + ], + [ + 1321, + 452 + ], + [ + 1208, + 436 + ], + [ + 1188, + 442 + ], + [ + 1169, + 443 + ], + [ + 1141, + 443 + ], + [ + 988, + 441 + ], + [ + 901, + 448 + ], + [ + 848, + 447 + ], + [ + 798, + 446 + ], + [ + 760, + 451 + ], + [ + 704, + 459 + ], + [ + 685, + 463 + ], + [ + 652, + 463 + ], + [ + 636, + 466 + ], + [ + 605, + 468 + ], + [ + 583, + 475 + ], + [ + 572, + 475 + ], + [ + 478, + 481 + ], + [ + 344, + 480 + ], + [ + 247, + 478 + ], + [ + 244, + 478 + ], + [ + 246, + 511 + ], + [ + 48, + 528 + ], + [ + 0, + 534 + ], + [ + 0, + 0 + ], + [ + 748, + 0 + ], + [ + 752, + 2 + ], + [ + 754, + 9 + ], + [ + 777, + 8 + ], + [ + 781, + 8 + ], + [ + 782, + 11 + ], + [ + 778, + 17 + ], + [ + 778, + 18 + ], + [ + 812, + 66 + ], + [ + 812, + 72 + ], + [ + 808, + 81 + ], + [ + 814, + 107 + ], + [ + 975, + 289 + ], + [ + 988, + 290 + ], + [ + 1001, + 311 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1032, + 300 + ], + [ + 1029, + 298 + ], + [ + 1032, + 294 + ], + [ + 1026, + 291 + ], + [ + 1017, + 285 + ], + [ + 1003, + 284 + ], + [ + 993, + 288 + ], + [ + 986, + 296 + ], + [ + 980, + 300 + ], + [ + 977, + 316 + ], + [ + 978, + 327 + ], + [ + 983, + 336 + ], + [ + 983, + 340 + ], + [ + 982, + 351 + ], + [ + 983, + 357 + ], + [ + 988, + 366 + ], + [ + 986, + 367 + ], + [ + 984, + 376 + ], + [ + 987, + 385 + ], + [ + 989, + 395 + ], + [ + 990, + 405 + ], + [ + 997, + 406 + ], + [ + 1001, + 410 + ], + [ + 1006, + 415 + ], + [ + 1032, + 415 + ], + [ + 1040, + 406 + ], + [ + 1042, + 401 + ], + [ + 1047, + 401 + ], + [ + 1054, + 400 + ], + [ + 1055, + 388 + ], + [ + 1056, + 381 + ], + [ + 1059, + 375 + ], + [ + 1064, + 373 + ], + [ + 1072, + 368 + ], + [ + 1073, + 359 + ], + [ + 1072, + 351 + ], + [ + 1069, + 345 + ], + [ + 1073, + 335 + ], + [ + 1071, + 326 + ], + [ + 1059, + 320 + ], + [ + 1057, + 312 + ], + [ + 1048, + 308 + ], + [ + 1048, + 303 + ], + [ + 1038, + 299 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1087, + 340 + ], + [ + 1079, + 342 + ], + [ + 1073, + 349 + ], + [ + 1072, + 355 + ], + [ + 1072, + 365 + ], + [ + 1074, + 368 + ], + [ + 1073, + 373 + ], + [ + 1072, + 381 + ], + [ + 1073, + 388 + ], + [ + 1076, + 396 + ], + [ + 1079, + 401 + ], + [ + 1086, + 399 + ], + [ + 1090, + 401 + ], + [ + 1104, + 400 + ], + [ + 1105, + 402 + ], + [ + 1107, + 408 + ], + [ + 1115, + 412 + ], + [ + 1126, + 415 + ], + [ + 1136, + 415 + ], + [ + 1140, + 408 + ], + [ + 1143, + 402 + ], + [ + 1150, + 402 + ], + [ + 1157, + 406 + ], + [ + 1162, + 401 + ], + [ + 1163, + 388 + ], + [ + 1150, + 388 + ], + [ + 1141, + 384 + ], + [ + 1141, + 379 + ], + [ + 1153, + 372 + ], + [ + 1154, + 356 + ], + [ + 1149, + 352 + ], + [ + 1149, + 344 + ], + [ + 1151, + 336 + ], + [ + 1148, + 327 + ], + [ + 1142, + 319 + ], + [ + 1121, + 319 + ], + [ + 1105, + 322 + ], + [ + 1095, + 322 + ], + [ + 1086, + 333 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1048, + 379 + ], + [ + 1058, + 377 + ], + [ + 1053, + 387 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1059, + 405 + ], + [ + 1058, + 388 + ], + [ + 1050, + 389 + ], + [ + 1048, + 408 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1037, + 379 + ], + [ + 1037, + 368 + ], + [ + 1032, + 369 + ], + [ + 1032, + 381 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1079, + 418 + ], + [ + 1079, + 406 + ], + [ + 1077, + 392 + ], + [ + 1077, + 381 + ], + [ + 1066, + 378 + ], + [ + 1045, + 372 + ], + [ + 1036, + 372 + ], + [ + 1035, + 374 + ], + [ + 1045, + 376 + ], + [ + 1071, + 382 + ], + [ + 1075, + 394 + ], + [ + 1075, + 413 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1050, + 360 + ], + [ + 1051, + 379 + ], + [ + 1059, + 378 + ], + [ + 1059, + 361 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1091, + 413 + ], + [ + 1091, + 397 + ], + [ + 1098, + 398 + ], + [ + 1100, + 398 + ], + [ + 1100, + 413 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 925, + 382 + ], + [ + 925, + 436 + ], + [ + 928, + 436 + ], + [ + 926, + 379 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 932, + 384 + ], + [ + 932, + 395 + ], + [ + 928, + 395 + ], + [ + 927, + 383 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 918, + 385 + ], + [ + 918, + 431 + ], + [ + 920, + 431 + ], + [ + 921, + 386 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 906, + 390 + ], + [ + 906, + 385 + ], + [ + 923, + 386 + ], + [ + 920, + 392 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 934, + 400 + ], + [ + 936, + 443 + ], + [ + 932, + 442 + ], + [ + 932, + 397 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 947, + 425 + ], + [ + 942, + 416 + ], + [ + 945, + 408 + ], + [ + 953, + 400 + ], + [ + 962, + 401 + ], + [ + 969, + 407 + ], + [ + 972, + 410 + ], + [ + 981, + 408 + ], + [ + 986, + 415 + ], + [ + 980, + 425 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 885, + 400 + ], + [ + 886, + 414 + ], + [ + 879, + 417 + ], + [ + 879, + 400 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 898, + 430 + ], + [ + 900, + 417 + ], + [ + 900, + 398 + ], + [ + 903, + 375 + ], + [ + 911, + 369 + ], + [ + 923, + 368 + ], + [ + 940, + 361 + ], + [ + 948, + 353 + ], + [ + 944, + 340 + ], + [ + 948, + 335 + ], + [ + 953, + 331 + ], + [ + 954, + 317 + ], + [ + 951, + 311 + ], + [ + 962, + 311 + ], + [ + 972, + 311 + ], + [ + 981, + 313 + ], + [ + 987, + 303 + ], + [ + 992, + 290 + ], + [ + 995, + 280 + ], + [ + 1000, + 284 + ], + [ + 1003, + 291 + ], + [ + 1013, + 292 + ], + [ + 1014, + 282 + ], + [ + 1008, + 270 + ], + [ + 1007, + 261 + ], + [ + 1019, + 258 + ], + [ + 1020, + 251 + ], + [ + 1011, + 244 + ], + [ + 1010, + 241 + ], + [ + 1020, + 238 + ], + [ + 1017, + 228 + ], + [ + 1008, + 219 + ], + [ + 1017, + 217 + ], + [ + 1024, + 223 + ], + [ + 1039, + 224 + ], + [ + 1038, + 205 + ], + [ + 1026, + 204 + ], + [ + 1014, + 204 + ], + [ + 1007, + 205 + ], + [ + 1005, + 208 + ], + [ + 1000, + 206 + ], + [ + 1001, + 200 + ], + [ + 998, + 187 + ], + [ + 992, + 188 + ], + [ + 992, + 198 + ], + [ + 986, + 201 + ], + [ + 980, + 197 + ], + [ + 989, + 193 + ], + [ + 989, + 184 + ], + [ + 981, + 171 + ], + [ + 974, + 172 + ], + [ + 976, + 160 + ], + [ + 972, + 154 + ], + [ + 966, + 154 + ], + [ + 962, + 160 + ], + [ + 962, + 155 + ], + [ + 955, + 146 + ], + [ + 949, + 145 + ], + [ + 947, + 148 + ], + [ + 946, + 153 + ], + [ + 934, + 146 + ], + [ + 928, + 139 + ], + [ + 931, + 135 + ], + [ + 930, + 130 + ], + [ + 920, + 124 + ], + [ + 911, + 123 + ], + [ + 907, + 128 + ], + [ + 905, + 122 + ], + [ + 895, + 123 + ], + [ + 892, + 98 + ], + [ + 885, + 97 + ], + [ + 884, + 103 + ], + [ + 883, + 118 + ], + [ + 879, + 118 + ], + [ + 872, + 110 + ], + [ + 866, + 108 + ], + [ + 859, + 109 + ], + [ + 855, + 102 + ], + [ + 851, + 101 + ], + [ + 836, + 88 + ], + [ + 836, + 79 + ], + [ + 827, + 79 + ], + [ + 816, + 76 + ], + [ + 811, + 76 + ], + [ + 804, + 81 + ], + [ + 796, + 88 + ], + [ + 787, + 90 + ], + [ + 786, + 97 + ], + [ + 779, + 100 + ], + [ + 780, + 93 + ], + [ + 772, + 90 + ], + [ + 769, + 90 + ], + [ + 753, + 97 + ], + [ + 742, + 99 + ], + [ + 736, + 102 + ], + [ + 726, + 105 + ], + [ + 714, + 109 + ], + [ + 708, + 119 + ], + [ + 691, + 125 + ], + [ + 681, + 131 + ], + [ + 671, + 133 + ], + [ + 664, + 139 + ], + [ + 657, + 147 + ], + [ + 652, + 145 + ], + [ + 645, + 149 + ], + [ + 638, + 160 + ], + [ + 634, + 181 + ], + [ + 633, + 193 + ], + [ + 634, + 203 + ], + [ + 627, + 208 + ], + [ + 624, + 225 + ], + [ + 626, + 237 + ], + [ + 633, + 247 + ], + [ + 646, + 252 + ], + [ + 658, + 248 + ], + [ + 660, + 257 + ], + [ + 649, + 267 + ], + [ + 635, + 272 + ], + [ + 627, + 283 + ], + [ + 630, + 310 + ], + [ + 635, + 327 + ], + [ + 652, + 336 + ], + [ + 666, + 325 + ], + [ + 682, + 317 + ], + [ + 694, + 317 + ], + [ + 684, + 325 + ], + [ + 676, + 332 + ], + [ + 668, + 337 + ], + [ + 676, + 342 + ], + [ + 690, + 337 + ], + [ + 701, + 329 + ], + [ + 715, + 329 + ], + [ + 739, + 329 + ], + [ + 762, + 311 + ], + [ + 800, + 289 + ], + [ + 833, + 289 + ], + [ + 860, + 292 + ], + [ + 865, + 311 + ], + [ + 865, + 325 + ], + [ + 869, + 339 + ], + [ + 874, + 346 + ], + [ + 875, + 350 + ], + [ + 879, + 362 + ], + [ + 884, + 367 + ], + [ + 890, + 373 + ], + [ + 892, + 379 + ], + [ + 892, + 393 + ], + [ + 890, + 412 + ], + [ + 888, + 428 + ], + [ + 886, + 431 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 875, + 430 + ], + [ + 880, + 425 + ], + [ + 888, + 423 + ], + [ + 895, + 424 + ], + [ + 906, + 428 + ], + [ + 906, + 438 + ], + [ + 894, + 449 + ], + [ + 880, + 446 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 869, + 428 + ], + [ + 868, + 404 + ], + [ + 868, + 388 + ], + [ + 867, + 373 + ], + [ + 872, + 362 + ], + [ + 878, + 357 + ], + [ + 879, + 357 + ], + [ + 876, + 362 + ], + [ + 872, + 369 + ], + [ + 871, + 383 + ], + [ + 873, + 395 + ], + [ + 873, + 423 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 829, + 425 + ], + [ + 839, + 418 + ], + [ + 843, + 414 + ], + [ + 853, + 414 + ], + [ + 879, + 415 + ], + [ + 883, + 421 + ], + [ + 885, + 432 + ], + [ + 885, + 436 + ], + [ + 885, + 450 + ], + [ + 885, + 458 + ], + [ + 879, + 460 + ], + [ + 874, + 460 + ], + [ + 870, + 456 + ], + [ + 858, + 456 + ], + [ + 840, + 449 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 837, + 461 + ], + [ + 840, + 439 + ], + [ + 845, + 427 + ], + [ + 850, + 427 + ], + [ + 859, + 434 + ], + [ + 864, + 439 + ], + [ + 865, + 440 + ], + [ + 863, + 447 + ], + [ + 865, + 456 + ], + [ + 866, + 462 + ], + [ + 855, + 466 + ], + [ + 842, + 460 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 832, + 416 + ], + [ + 835, + 389 + ], + [ + 831, + 350 + ], + [ + 829, + 336 + ], + [ + 824, + 331 + ], + [ + 816, + 330 + ], + [ + 816, + 346 + ], + [ + 800, + 348 + ], + [ + 805, + 360 + ], + [ + 808, + 367 + ], + [ + 805, + 372 + ], + [ + 788, + 375 + ], + [ + 774, + 375 + ], + [ + 764, + 373 + ], + [ + 764, + 373 + ], + [ + 765, + 461 + ], + [ + 754, + 461 + ], + [ + 754, + 427 + ], + [ + 754, + 403 + ], + [ + 752, + 389 + ], + [ + 746, + 385 + ], + [ + 751, + 464 + ], + [ + 754, + 474 + ], + [ + 733, + 471 + ], + [ + 740, + 398 + ], + [ + 737, + 374 + ], + [ + 726, + 363 + ], + [ + 712, + 348 + ], + [ + 704, + 323 + ], + [ + 716, + 242 + ], + [ + 842, + 215 + ], + [ + 868, + 247 + ], + [ + 876, + 272 + ], + [ + 876, + 297 + ], + [ + 869, + 314 + ], + [ + 858, + 328 + ], + [ + 844, + 341 + ], + [ + 846, + 412 + ], + [ + 840, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 805, + 425 + ], + [ + 789, + 425 + ], + [ + 779, + 426 + ], + [ + 777, + 433 + ], + [ + 782, + 444 + ], + [ + 785, + 454 + ], + [ + 795, + 456 + ], + [ + 806, + 451 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 795, + 403 + ], + [ + 847, + 406 + ], + [ + 850, + 460 + ], + [ + 847, + 468 + ], + [ + 841, + 466 + ], + [ + 837, + 464 + ], + [ + 822, + 463 + ], + [ + 817, + 464 + ], + [ + 814, + 468 + ], + [ + 810, + 465 + ], + [ + 805, + 464 + ], + [ + 793, + 460 + ], + [ + 793, + 460 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 714, + 470 + ], + [ + 706, + 457 + ], + [ + 709, + 445 + ], + [ + 713, + 439 + ], + [ + 714, + 431 + ], + [ + 720, + 421 + ], + [ + 732, + 420 + ], + [ + 740, + 429 + ], + [ + 754, + 436 + ], + [ + 759, + 436 + ], + [ + 761, + 452 + ], + [ + 761, + 464 + ], + [ + 751, + 471 + ], + [ + 738, + 473 + ], + [ + 731, + 472 + ], + [ + 719, + 473 + ] + ] + }, + { + "label": "polegroup", + "polygon": [ + [ + 782, + 475 + ], + [ + 754, + 477 + ], + [ + 754, + 444 + ], + [ + 782, + 443 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 899, + 263 + ], + [ + 874, + 263 + ], + [ + 876, + 215 + ], + [ + 899, + 215 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 743, + 482 + ], + [ + 736, + 481 + ], + [ + 735, + 392 + ], + [ + 732, + 298 + ], + [ + 735, + 271 + ], + [ + 751, + 252 + ], + [ + 782, + 242 + ], + [ + 858, + 227 + ], + [ + 883, + 228 + ], + [ + 883, + 231 + ], + [ + 857, + 230 + ], + [ + 771, + 250 + ], + [ + 753, + 258 + ], + [ + 742, + 270 + ], + [ + 738, + 281 + ], + [ + 737, + 295 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 719, + 378 + ], + [ + 704, + 367 + ], + [ + 717, + 356 + ], + [ + 729, + 366 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 710, + 354 + ], + [ + 706, + 347 + ], + [ + 706, + 338 + ], + [ + 709, + 334 + ], + [ + 717, + 331 + ], + [ + 726, + 333 + ], + [ + 729, + 338 + ], + [ + 729, + 349 + ], + [ + 726, + 355 + ], + [ + 715, + 357 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 748, + 352 + ], + [ + 758, + 354 + ], + [ + 759, + 358 + ], + [ + 755, + 360 + ], + [ + 750, + 360 + ], + [ + 750, + 367 + ], + [ + 757, + 367 + ], + [ + 760, + 367 + ], + [ + 757, + 370 + ], + [ + 755, + 372 + ], + [ + 749, + 373 + ], + [ + 749, + 379 + ], + [ + 756, + 380 + ], + [ + 758, + 380 + ], + [ + 755, + 383 + ], + [ + 748, + 385 + ], + [ + 745, + 388 + ], + [ + 738, + 388 + ], + [ + 739, + 352 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 732, + 490 + ], + [ + 726, + 490 + ], + [ + 727, + 441 + ], + [ + 733, + 442 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 721, + 488 + ], + [ + 722, + 442 + ], + [ + 726, + 442 + ], + [ + 725, + 488 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 680, + 477 + ], + [ + 679, + 488 + ], + [ + 674, + 487 + ], + [ + 676, + 436 + ], + [ + 682, + 435 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 605, + 477 + ], + [ + 605, + 486 + ], + [ + 602, + 486 + ], + [ + 599, + 441 + ], + [ + 604, + 441 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 524, + 245 + ], + [ + 531, + 495 + ], + [ + 522, + 495 + ], + [ + 519, + 247 + ], + [ + 518, + 235 + ], + [ + 513, + 228 + ], + [ + 488, + 192 + ], + [ + 481, + 187 + ], + [ + 472, + 187 + ], + [ + 472, + 182 + ], + [ + 484, + 185 + ], + [ + 507, + 211 + ], + [ + 520, + 231 + ], + [ + 522, + 237 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 477, + 220 + ], + [ + 475, + 173 + ], + [ + 465, + 174 + ], + [ + 456, + 169 + ], + [ + 458, + 178 + ], + [ + 450, + 179 + ], + [ + 447, + 179 + ], + [ + 451, + 182 + ], + [ + 459, + 187 + ], + [ + 458, + 192 + ], + [ + 449, + 195 + ], + [ + 450, + 198 + ], + [ + 459, + 201 + ], + [ + 460, + 206 + ], + [ + 450, + 208 + ], + [ + 450, + 211 + ], + [ + 454, + 214 + ], + [ + 461, + 217 + ], + [ + 461, + 224 + ], + [ + 464, + 225 + ], + [ + 466, + 225 + ], + [ + 465, + 219 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 533, + 290 + ], + [ + 532, + 299 + ], + [ + 523, + 296 + ], + [ + 523, + 289 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 486, + 323 + ], + [ + 486, + 310 + ], + [ + 520, + 311 + ], + [ + 520, + 326 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 547, + 384 + ], + [ + 544, + 372 + ], + [ + 545, + 363 + ], + [ + 550, + 356 + ], + [ + 554, + 356 + ], + [ + 554, + 362 + ], + [ + 552, + 389 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 541, + 386 + ], + [ + 538, + 345 + ], + [ + 523, + 346 + ], + [ + 524, + 389 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 510, + 342 + ], + [ + 498, + 347 + ], + [ + 498, + 351 + ], + [ + 502, + 353 + ], + [ + 509, + 355 + ], + [ + 508, + 359 + ], + [ + 498, + 360 + ], + [ + 499, + 363 + ], + [ + 501, + 366 + ], + [ + 508, + 370 + ], + [ + 507, + 374 + ], + [ + 497, + 375 + ], + [ + 498, + 377 + ], + [ + 503, + 382 + ], + [ + 509, + 382 + ], + [ + 510, + 384 + ], + [ + 511, + 387 + ], + [ + 522, + 388 + ], + [ + 522, + 340 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 434, + 464 + ], + [ + 433, + 487 + ], + [ + 429, + 487 + ], + [ + 428, + 442 + ], + [ + 433, + 441 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 448, + 333 + ], + [ + 443, + 336 + ], + [ + 438, + 339 + ], + [ + 437, + 346 + ], + [ + 437, + 353 + ], + [ + 439, + 357 + ], + [ + 447, + 360 + ], + [ + 454, + 360 + ], + [ + 461, + 357 + ], + [ + 465, + 353 + ], + [ + 466, + 346 + ], + [ + 462, + 338 + ], + [ + 458, + 334 + ], + [ + 453, + 333 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 454, + 369 + ], + [ + 453, + 481 + ], + [ + 449, + 481 + ], + [ + 449, + 334 + ], + [ + 454, + 334 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 435, + 439 + ], + [ + 447, + 440 + ], + [ + 453, + 440 + ], + [ + 464, + 442 + ], + [ + 458, + 447 + ], + [ + 450, + 450 + ], + [ + 450, + 462 + ], + [ + 452, + 469 + ], + [ + 464, + 470 + ], + [ + 465, + 483 + ], + [ + 465, + 494 + ], + [ + 460, + 508 + ], + [ + 453, + 511 + ], + [ + 450, + 498 + ], + [ + 449, + 483 + ], + [ + 446, + 479 + ], + [ + 444, + 496 + ], + [ + 443, + 509 + ], + [ + 436, + 511 + ], + [ + 435, + 492 + ], + [ + 435, + 477 + ], + [ + 436, + 464 + ], + [ + 441, + 460 + ], + [ + 440, + 446 + ], + [ + 430, + 446 + ], + [ + 423, + 441 + ], + [ + 428, + 439 + ] + ] + }, + { + "label": "polegroup", + "polygon": [ + [ + 477, + 452 + ], + [ + 478, + 524 + ], + [ + 472, + 523 + ], + [ + 469, + 515 + ], + [ + 464, + 515 + ], + [ + 465, + 453 + ], + [ + 473, + 451 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 456, + 527 + ], + [ + 448, + 527 + ], + [ + 449, + 452 + ], + [ + 454, + 453 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 406, + 434 + ], + [ + 407, + 489 + ], + [ + 373, + 486 + ], + [ + 372, + 434 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 347, + 445 + ], + [ + 346, + 486 + ], + [ + 342, + 485 + ], + [ + 342, + 445 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 293, + 266 + ], + [ + 299, + 491 + ], + [ + 291, + 490 + ], + [ + 289, + 252 + ], + [ + 283, + 222 + ], + [ + 276, + 195 + ], + [ + 264, + 181 + ], + [ + 255, + 172 + ], + [ + 259, + 170 + ], + [ + 267, + 177 + ], + [ + 281, + 199 + ], + [ + 287, + 220 + ], + [ + 292, + 249 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 252, + 153 + ], + [ + 257, + 155 + ], + [ + 257, + 159 + ], + [ + 258, + 161 + ], + [ + 264, + 173 + ], + [ + 263, + 176 + ], + [ + 256, + 177 + ], + [ + 242, + 175 + ], + [ + 242, + 161 + ], + [ + 248, + 159 + ], + [ + 248, + 156 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 351, + 509 + ], + [ + 361, + 509 + ], + [ + 359, + 385 + ], + [ + 357, + 312 + ], + [ + 349, + 314 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 379, + 340 + ], + [ + 376, + 325 + ], + [ + 367, + 317 + ], + [ + 356, + 317 + ], + [ + 353, + 334 + ], + [ + 355, + 348 + ], + [ + 367, + 349 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 383, + 358 + ], + [ + 381, + 348 + ], + [ + 375, + 342 + ], + [ + 367, + 342 + ], + [ + 358, + 347 + ], + [ + 352, + 355 + ], + [ + 353, + 367 + ], + [ + 357, + 375 + ], + [ + 367, + 377 + ], + [ + 377, + 372 + ], + [ + 381, + 366 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 420, + 524 + ], + [ + 419, + 534 + ], + [ + 408, + 533 + ], + [ + 416, + 450 + ], + [ + 420, + 450 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 244, + 411 + ], + [ + 245, + 510 + ], + [ + 251, + 510 + ], + [ + 253, + 411 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 271, + 455 + ], + [ + 267, + 456 + ], + [ + 268, + 511 + ], + [ + 272, + 511 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 335, + 543 + ], + [ + 317, + 542 + ], + [ + 317, + 468 + ], + [ + 321, + 463 + ], + [ + 313, + 243 + ], + [ + 323, + 244 + ], + [ + 327, + 461 + ], + [ + 334, + 464 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 331, + 260 + ], + [ + 330, + 346 + ], + [ + 353, + 344 + ], + [ + 353, + 334 + ], + [ + 367, + 332 + ], + [ + 372, + 325 + ], + [ + 367, + 322 + ], + [ + 358, + 318 + ], + [ + 357, + 313 + ], + [ + 356, + 308 + ], + [ + 373, + 305 + ], + [ + 372, + 296 + ], + [ + 363, + 294 + ], + [ + 353, + 291 + ], + [ + 353, + 283 + ], + [ + 368, + 280 + ], + [ + 370, + 272 + ], + [ + 368, + 270 + ], + [ + 345, + 266 + ], + [ + 341, + 261 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 305, + 260 + ], + [ + 307, + 347 + ], + [ + 331, + 347 + ], + [ + 330, + 259 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 225, + 138 + ], + [ + 229, + 522 + ], + [ + 255, + 551 + ], + [ + 250, + 553 + ], + [ + 221, + 560 + ], + [ + 192, + 558 + ], + [ + 83, + 548 + ], + [ + 92, + 505 + ], + [ + 87, + 136 + ], + [ + 83, + 131 + ], + [ + 59, + 107 + ], + [ + 63, + 100 + ], + [ + 103, + 95 + ], + [ + 212, + 98 + ], + [ + 238, + 104 + ], + [ + 249, + 111 + ], + [ + 253, + 116 + ], + [ + 243, + 118 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 10, + 217 + ], + [ + 35, + 193 + ], + [ + 47, + 170 + ], + [ + 59, + 157 + ], + [ + 75, + 154 + ], + [ + 79, + 162 + ], + [ + 83, + 178 + ], + [ + 90, + 179 + ], + [ + 88, + 156 + ], + [ + 85, + 137 + ], + [ + 85, + 137 + ], + [ + 68, + 130 + ], + [ + 45, + 128 + ], + [ + 35, + 118 + ], + [ + 44, + 109 + ], + [ + 59, + 88 + ], + [ + 69, + 73 + ], + [ + 57, + 56 + ], + [ + 74, + 48 + ], + [ + 82, + 54 + ], + [ + 76, + 72 + ], + [ + 76, + 93 + ], + [ + 101, + 116 + ], + [ + 114, + 126 + ], + [ + 118, + 136 + ], + [ + 115, + 145 + ], + [ + 107, + 156 + ], + [ + 110, + 187 + ], + [ + 118, + 207 + ], + [ + 141, + 217 + ], + [ + 153, + 219 + ], + [ + 177, + 214 + ], + [ + 180, + 191 + ], + [ + 161, + 183 + ], + [ + 150, + 173 + ], + [ + 151, + 169 + ], + [ + 173, + 168 + ], + [ + 184, + 155 + ], + [ + 178, + 140 + ], + [ + 170, + 118 + ], + [ + 178, + 106 + ], + [ + 197, + 109 + ], + [ + 219, + 114 + ], + [ + 228, + 108 + ], + [ + 228, + 94 + ], + [ + 240, + 96 + ], + [ + 256, + 92 + ], + [ + 261, + 78 + ], + [ + 265, + 66 + ], + [ + 278, + 56 + ], + [ + 296, + 54 + ], + [ + 321, + 48 + ], + [ + 327, + 45 + ], + [ + 340, + 51 + ], + [ + 363, + 47 + ], + [ + 379, + 37 + ], + [ + 363, + 22 + ], + [ + 350, + 19 + ], + [ + 342, + 21 + ], + [ + 336, + 12 + ], + [ + 316, + 11 + ], + [ + 305, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 211 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 215, + 572 + ], + [ + 0, + 607 + ], + [ + 0, + 504 + ], + [ + 4, + 498 + ], + [ + 13, + 496 + ], + [ + 32, + 505 + ], + [ + 45, + 508 + ], + [ + 61, + 504 + ], + [ + 75, + 497 + ], + [ + 93, + 499 + ], + [ + 103, + 501 + ], + [ + 117, + 493 + ], + [ + 132, + 493 + ], + [ + 144, + 497 + ], + [ + 153, + 499 + ], + [ + 163, + 491 + ], + [ + 173, + 491 + ], + [ + 185, + 502 + ], + [ + 194, + 514 + ], + [ + 204, + 518 + ], + [ + 221, + 524 + ], + [ + 231, + 544 + ], + [ + 225, + 566 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 892, + 464 + ], + [ + 892, + 470 + ], + [ + 887, + 471 + ], + [ + 880, + 469 + ], + [ + 878, + 462 + ], + [ + 879, + 450 + ], + [ + 882, + 442 + ], + [ + 886, + 436 + ], + [ + 889, + 425 + ], + [ + 895, + 422 + ], + [ + 928, + 423 + ], + [ + 934, + 427 + ], + [ + 937, + 434 + ], + [ + 940, + 444 + ], + [ + 942, + 464 + ], + [ + 937, + 469 + ], + [ + 932, + 469 + ], + [ + 929, + 467 + ], + [ + 929, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 950, + 470 + ], + [ + 950, + 476 + ], + [ + 935, + 476 + ], + [ + 933, + 475 + ], + [ + 930, + 460 + ], + [ + 933, + 449 + ], + [ + 937, + 440 + ], + [ + 934, + 439 + ], + [ + 932, + 437 + ], + [ + 932, + 435 + ], + [ + 935, + 432 + ], + [ + 940, + 432 + ], + [ + 944, + 422 + ], + [ + 949, + 414 + ], + [ + 953, + 411 + ], + [ + 968, + 410 + ], + [ + 982, + 411 + ], + [ + 988, + 412 + ], + [ + 996, + 413 + ], + [ + 996, + 420 + ], + [ + 998, + 434 + ], + [ + 998, + 461 + ], + [ + 987, + 471 + ], + [ + 979, + 470 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1170, + 386 + ], + [ + 1170, + 441 + ], + [ + 1165, + 442 + ], + [ + 1166, + 378 + ], + [ + 1169, + 379 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1175, + 394 + ], + [ + 1179, + 395 + ], + [ + 1181, + 451 + ], + [ + 1177, + 451 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1182, + 410 + ], + [ + 1183, + 414 + ], + [ + 1179, + 417 + ], + [ + 1176, + 414 + ], + [ + 1176, + 410 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1209, + 420 + ], + [ + 1201, + 420 + ], + [ + 1185, + 422 + ], + [ + 1180, + 422 + ], + [ + 1177, + 426 + ], + [ + 1175, + 430 + ], + [ + 1173, + 434 + ], + [ + 1173, + 443 + ], + [ + 1173, + 448 + ], + [ + 1179, + 450 + ], + [ + 1188, + 450 + ], + [ + 1195, + 449 + ], + [ + 1205, + 450 + ], + [ + 1207, + 450 + ], + [ + 1211, + 440 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1204, + 457 + ], + [ + 1197, + 453 + ], + [ + 1191, + 449 + ], + [ + 1180, + 447 + ], + [ + 1180, + 437 + ], + [ + 1188, + 433 + ], + [ + 1193, + 426 + ], + [ + 1195, + 421 + ], + [ + 1202, + 433 + ], + [ + 1207, + 421 + ], + [ + 1199, + 420 + ], + [ + 1188, + 416 + ], + [ + 1189, + 411 + ], + [ + 1197, + 409 + ], + [ + 1197, + 398 + ], + [ + 1194, + 393 + ], + [ + 1193, + 382 + ], + [ + 1192, + 370 + ], + [ + 1183, + 373 + ], + [ + 1184, + 379 + ], + [ + 1183, + 388 + ], + [ + 1180, + 389 + ], + [ + 1178, + 395 + ], + [ + 1172, + 400 + ], + [ + 1169, + 394 + ], + [ + 1163, + 389 + ], + [ + 1158, + 392 + ], + [ + 1158, + 383 + ], + [ + 1155, + 381 + ], + [ + 1155, + 381 + ], + [ + 1146, + 381 + ], + [ + 1139, + 374 + ], + [ + 1147, + 342 + ], + [ + 1150, + 336 + ], + [ + 1159, + 330 + ], + [ + 1157, + 325 + ], + [ + 1152, + 327 + ], + [ + 1133, + 330 + ], + [ + 1117, + 322 + ], + [ + 1115, + 317 + ], + [ + 1108, + 313 + ], + [ + 1106, + 309 + ], + [ + 1114, + 308 + ], + [ + 1116, + 301 + ], + [ + 1108, + 301 + ], + [ + 1108, + 291 + ], + [ + 1101, + 291 + ], + [ + 1095, + 288 + ], + [ + 1089, + 284 + ], + [ + 1082, + 281 + ], + [ + 1075, + 279 + ], + [ + 1072, + 279 + ], + [ + 1066, + 284 + ], + [ + 1061, + 291 + ], + [ + 1053, + 294 + ], + [ + 1042, + 294 + ], + [ + 1041, + 288 + ], + [ + 1043, + 283 + ], + [ + 1043, + 280 + ], + [ + 1033, + 277 + ], + [ + 1032, + 270 + ], + [ + 1036, + 261 + ], + [ + 1039, + 253 + ], + [ + 1044, + 250 + ], + [ + 1033, + 246 + ], + [ + 1028, + 237 + ], + [ + 1039, + 236 + ], + [ + 1045, + 232 + ], + [ + 1040, + 229 + ], + [ + 1034, + 229 + ], + [ + 1029, + 218 + ], + [ + 1028, + 205 + ], + [ + 1030, + 201 + ], + [ + 1034, + 196 + ], + [ + 1037, + 187 + ], + [ + 1038, + 173 + ], + [ + 1038, + 168 + ], + [ + 1043, + 167 + ], + [ + 1041, + 163 + ], + [ + 1041, + 160 + ], + [ + 1035, + 159 + ], + [ + 1031, + 160 + ], + [ + 1028, + 152 + ], + [ + 1028, + 145 + ], + [ + 1034, + 145 + ], + [ + 1041, + 141 + ], + [ + 1040, + 137 + ], + [ + 1032, + 134 + ], + [ + 1028, + 129 + ], + [ + 1029, + 122 + ], + [ + 1029, + 116 + ], + [ + 1031, + 115 + ], + [ + 1037, + 118 + ], + [ + 1045, + 120 + ], + [ + 1045, + 112 + ], + [ + 1045, + 108 + ], + [ + 1046, + 108 + ], + [ + 1050, + 110 + ], + [ + 1050, + 102 + ], + [ + 1053, + 99 + ], + [ + 1054, + 97 + ], + [ + 1054, + 92 + ], + [ + 1054, + 90 + ], + [ + 1061, + 89 + ], + [ + 1068, + 91 + ], + [ + 1069, + 90 + ], + [ + 1066, + 82 + ], + [ + 1072, + 76 + ], + [ + 1080, + 76 + ], + [ + 1086, + 80 + ], + [ + 1091, + 89 + ], + [ + 1168, + 123 + ], + [ + 1216, + 161 + ], + [ + 1226, + 202 + ], + [ + 1236, + 243 + ], + [ + 1248, + 274 + ], + [ + 1257, + 296 + ], + [ + 1269, + 296 + ], + [ + 1279, + 294 + ], + [ + 1289, + 293 + ], + [ + 1298, + 278 + ], + [ + 1308, + 276 + ], + [ + 1317, + 278 + ], + [ + 1311, + 287 + ], + [ + 1322, + 289 + ], + [ + 1331, + 295 + ], + [ + 1334, + 312 + ], + [ + 1327, + 337 + ], + [ + 1309, + 350 + ], + [ + 1295, + 373 + ], + [ + 1298, + 389 + ], + [ + 1309, + 398 + ], + [ + 1323, + 408 + ], + [ + 1314, + 413 + ], + [ + 1284, + 417 + ], + [ + 1270, + 410 + ], + [ + 1262, + 402 + ], + [ + 1261, + 392 + ], + [ + 1258, + 378 + ], + [ + 1253, + 369 + ], + [ + 1244, + 355 + ], + [ + 1243, + 347 + ], + [ + 1240, + 342 + ], + [ + 1234, + 342 + ], + [ + 1230, + 359 + ], + [ + 1232, + 374 + ], + [ + 1242, + 380 + ], + [ + 1251, + 389 + ], + [ + 1255, + 394 + ], + [ + 1255, + 404 + ], + [ + 1248, + 416 + ], + [ + 1241, + 424 + ], + [ + 1225, + 438 + ], + [ + 1221, + 453 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1265, + 424 + ], + [ + 1263, + 393 + ], + [ + 1249, + 286 + ], + [ + 1225, + 250 + ], + [ + 1081, + 92 + ], + [ + 1079, + 76 + ], + [ + 1081, + 71 + ], + [ + 1075, + 66 + ], + [ + 1073, + 58 + ], + [ + 1079, + 46 + ], + [ + 1091, + 46 + ], + [ + 1098, + 37 + ], + [ + 1102, + 31 + ], + [ + 1093, + 26 + ], + [ + 1092, + 20 + ], + [ + 1101, + 14 + ], + [ + 1103, + 9 + ], + [ + 1097, + 2 + ], + [ + 1095, + 0 + ], + [ + 1385, + 0 + ], + [ + 1395, + 0 + ], + [ + 1410, + 6 + ], + [ + 1419, + 11 + ], + [ + 1435, + 20 + ], + [ + 1445, + 32 + ], + [ + 1453, + 38 + ], + [ + 1454, + 49 + ], + [ + 1432, + 63 + ], + [ + 1437, + 76 + ], + [ + 1449, + 80 + ], + [ + 1445, + 86 + ], + [ + 1433, + 88 + ], + [ + 1415, + 82 + ], + [ + 1402, + 72 + ], + [ + 1393, + 76 + ], + [ + 1392, + 88 + ], + [ + 1398, + 92 + ], + [ + 1411, + 90 + ], + [ + 1422, + 93 + ], + [ + 1413, + 103 + ], + [ + 1395, + 104 + ], + [ + 1384, + 113 + ], + [ + 1385, + 119 + ], + [ + 1394, + 119 + ], + [ + 1392, + 130 + ], + [ + 1387, + 135 + ], + [ + 1391, + 135 + ], + [ + 1391, + 143 + ], + [ + 1384, + 149 + ], + [ + 1376, + 150 + ], + [ + 1372, + 158 + ], + [ + 1368, + 166 + ], + [ + 1359, + 172 + ], + [ + 1342, + 175 + ], + [ + 1354, + 181 + ], + [ + 1367, + 181 + ], + [ + 1371, + 194 + ], + [ + 1382, + 205 + ], + [ + 1386, + 211 + ], + [ + 1395, + 213 + ], + [ + 1393, + 220 + ], + [ + 1386, + 227 + ], + [ + 1381, + 228 + ], + [ + 1390, + 237 + ], + [ + 1390, + 244 + ], + [ + 1377, + 244 + ], + [ + 1374, + 247 + ], + [ + 1373, + 254 + ], + [ + 1381, + 260 + ], + [ + 1389, + 260 + ], + [ + 1390, + 265 + ], + [ + 1385, + 268 + ], + [ + 1367, + 267 + ], + [ + 1357, + 263 + ], + [ + 1347, + 256 + ], + [ + 1348, + 260 + ], + [ + 1351, + 268 + ], + [ + 1347, + 275 + ], + [ + 1337, + 276 + ], + [ + 1329, + 271 + ], + [ + 1318, + 271 + ], + [ + 1317, + 278 + ], + [ + 1302, + 273 + ], + [ + 1288, + 279 + ], + [ + 1274, + 287 + ], + [ + 1265, + 298 + ], + [ + 1266, + 311 + ], + [ + 1281, + 430 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1231, + 416 + ], + [ + 1221, + 418 + ], + [ + 1214, + 423 + ], + [ + 1210, + 430 + ], + [ + 1210, + 437 + ], + [ + 1209, + 450 + ], + [ + 1210, + 457 + ], + [ + 1213, + 460 + ], + [ + 1219, + 461 + ], + [ + 1224, + 453 + ], + [ + 1234, + 430 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1238, + 418 + ], + [ + 1230, + 418 + ], + [ + 1224, + 426 + ], + [ + 1221, + 433 + ], + [ + 1219, + 442 + ], + [ + 1219, + 455 + ], + [ + 1220, + 463 + ], + [ + 1224, + 464 + ], + [ + 1234, + 459 + ], + [ + 1241, + 436 + ], + [ + 1246, + 422 + ], + [ + 1248, + 418 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1249, + 425 + ], + [ + 1249, + 373 + ], + [ + 1252, + 373 + ], + [ + 1254, + 426 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1233, + 386 + ], + [ + 1232, + 340 + ], + [ + 1234, + 339 + ], + [ + 1239, + 339 + ], + [ + 1249, + 348 + ], + [ + 1254, + 353 + ], + [ + 1256, + 361 + ], + [ + 1256, + 373 + ], + [ + 1260, + 383 + ], + [ + 1261, + 386 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1249, + 464 + ], + [ + 1245, + 466 + ], + [ + 1241, + 467 + ], + [ + 1235, + 468 + ], + [ + 1231, + 457 + ], + [ + 1225, + 439 + ], + [ + 1229, + 433 + ], + [ + 1231, + 423 + ], + [ + 1238, + 419 + ], + [ + 1250, + 418 + ], + [ + 1257, + 418 + ], + [ + 1258, + 419 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1322, + 432 + ], + [ + 1321, + 422 + ], + [ + 1317, + 410 + ], + [ + 1309, + 402 + ], + [ + 1299, + 401 + ], + [ + 1263, + 403 + ], + [ + 1253, + 407 + ], + [ + 1250, + 417 + ], + [ + 1246, + 426 + ], + [ + 1244, + 425 + ], + [ + 1240, + 425 + ], + [ + 1239, + 432 + ], + [ + 1242, + 436 + ], + [ + 1239, + 456 + ], + [ + 1239, + 467 + ], + [ + 1242, + 473 + ], + [ + 1249, + 473 + ], + [ + 1251, + 466 + ], + [ + 1255, + 464 + ], + [ + 1254, + 471 + ], + [ + 1265, + 477 + ], + [ + 1269, + 472 + ], + [ + 1272, + 467 + ], + [ + 1273, + 464 + ], + [ + 1285, + 464 + ], + [ + 1308, + 464 + ], + [ + 1310, + 466 + ], + [ + 1310, + 470 + ], + [ + 1320, + 469 + ], + [ + 1322, + 467 + ], + [ + 1326, + 466 + ], + [ + 1327, + 463 + ], + [ + 1327, + 455 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1290, + 436 + ], + [ + 1291, + 474 + ], + [ + 1288, + 474 + ], + [ + 1286, + 436 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1308, + 371 + ], + [ + 1308, + 477 + ], + [ + 1313, + 478 + ], + [ + 1314, + 479 + ], + [ + 1314, + 369 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1309, + 344 + ], + [ + 1314, + 347 + ], + [ + 1312, + 384 + ], + [ + 1299, + 383 + ], + [ + 1298, + 379 + ], + [ + 1289, + 378 + ], + [ + 1289, + 375 + ], + [ + 1295, + 375 + ], + [ + 1297, + 367 + ], + [ + 1292, + 366 + ], + [ + 1291, + 363 + ], + [ + 1299, + 362 + ], + [ + 1299, + 354 + ], + [ + 1290, + 354 + ], + [ + 1287, + 349 + ], + [ + 1298, + 349 + ], + [ + 1300, + 347 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1317, + 367 + ], + [ + 1321, + 373 + ], + [ + 1321, + 376 + ], + [ + 1317, + 380 + ], + [ + 1310, + 382 + ], + [ + 1304, + 381 + ], + [ + 1301, + 377 + ], + [ + 1300, + 371 + ], + [ + 1302, + 367 + ], + [ + 1306, + 365 + ], + [ + 1311, + 365 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1857, + 383 + ], + [ + 1932, + 385 + ], + [ + 1931, + 514 + ], + [ + 1900, + 516 + ], + [ + 1854, + 508 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1726, + 339 + ], + [ + 1723, + 331 + ], + [ + 1715, + 332 + ], + [ + 1713, + 344 + ], + [ + 1712, + 367 + ], + [ + 1720, + 368 + ], + [ + 1728, + 365 + ], + [ + 1726, + 348 + ], + [ + 1723, + 346 + ], + [ + 1726, + 344 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1485, + 394 + ], + [ + 1479, + 379 + ], + [ + 1480, + 351 + ], + [ + 1485, + 315 + ], + [ + 1485, + 298 + ], + [ + 1476, + 290 + ], + [ + 1478, + 281 + ], + [ + 1467, + 273 + ], + [ + 1465, + 265 + ], + [ + 1473, + 256 + ], + [ + 1475, + 250 + ], + [ + 1465, + 239 + ], + [ + 1454, + 236 + ], + [ + 1459, + 216 + ], + [ + 1460, + 210 + ], + [ + 1445, + 202 + ], + [ + 1450, + 190 + ], + [ + 1465, + 190 + ], + [ + 1463, + 184 + ], + [ + 1456, + 181 + ], + [ + 1456, + 169 + ], + [ + 1461, + 163 + ], + [ + 1456, + 150 + ], + [ + 1468, + 141 + ], + [ + 1470, + 131 + ], + [ + 1468, + 109 + ], + [ + 1422, + 74 + ], + [ + 1390, + 0 + ], + [ + 1440, + 0 + ], + [ + 1549, + 0 + ], + [ + 1547, + 7 + ], + [ + 1544, + 22 + ], + [ + 1546, + 31 + ], + [ + 1554, + 17 + ], + [ + 1563, + 16 + ], + [ + 1571, + 19 + ], + [ + 1573, + 7 + ], + [ + 1571, + 0 + ], + [ + 1754, + 0 + ], + [ + 1755, + 7 + ], + [ + 1754, + 14 + ], + [ + 1764, + 15 + ], + [ + 1769, + 27 + ], + [ + 1769, + 39 + ], + [ + 1754, + 46 + ], + [ + 1750, + 260 + ], + [ + 1750, + 276 + ], + [ + 1750, + 288 + ], + [ + 1741, + 289 + ], + [ + 1740, + 297 + ], + [ + 1745, + 298 + ], + [ + 1748, + 298 + ], + [ + 1747, + 311 + ], + [ + 1737, + 317 + ], + [ + 1727, + 332 + ], + [ + 1720, + 339 + ], + [ + 1715, + 352 + ], + [ + 1715, + 365 + ], + [ + 1722, + 375 + ], + [ + 1727, + 379 + ], + [ + 1732, + 366 + ], + [ + 1741, + 358 + ], + [ + 1743, + 352 + ], + [ + 1736, + 345 + ], + [ + 1749, + 334 + ], + [ + 1764, + 333 + ], + [ + 1778, + 353 + ], + [ + 1775, + 392 + ], + [ + 1765, + 437 + ], + [ + 1755, + 450 + ], + [ + 1757, + 464 + ], + [ + 1762, + 467 + ], + [ + 1785, + 459 + ], + [ + 1808, + 456 + ], + [ + 1849, + 464 + ], + [ + 1869, + 480 + ], + [ + 1871, + 507 + ], + [ + 1871, + 519 + ], + [ + 1874, + 525 + ], + [ + 1882, + 533 + ], + [ + 1874, + 544 + ], + [ + 1888, + 554 + ], + [ + 1888, + 564 + ], + [ + 1858, + 566 + ], + [ + 1819, + 566 + ], + [ + 1777, + 564 + ], + [ + 1759, + 563 + ], + [ + 1726, + 549 + ], + [ + 1668, + 397 + ], + [ + 1664, + 383 + ], + [ + 1663, + 367 + ], + [ + 1671, + 357 + ], + [ + 1681, + 356 + ], + [ + 1688, + 348 + ], + [ + 1693, + 341 + ], + [ + 1692, + 325 + ], + [ + 1680, + 301 + ], + [ + 1667, + 287 + ], + [ + 1651, + 277 + ], + [ + 1641, + 263 + ], + [ + 1560, + 286 + ], + [ + 1525, + 290 + ], + [ + 1516, + 297 + ], + [ + 1509, + 307 + ], + [ + 1505, + 340 + ], + [ + 1506, + 384 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1686, + 187 + ], + [ + 1687, + 0 + ], + [ + 1750, + 0 + ], + [ + 1748, + 187 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1598, + 383 + ], + [ + 1598, + 251 + ], + [ + 1607, + 208 + ], + [ + 1622, + 163 + ], + [ + 1630, + 154 + ], + [ + 1639, + 151 + ], + [ + 1639, + 155 + ], + [ + 1627, + 163 + ], + [ + 1617, + 186 + ], + [ + 1608, + 219 + ], + [ + 1606, + 254 + ], + [ + 1605, + 402 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1633, + 191 + ], + [ + 1634, + 145 + ], + [ + 1646, + 143 + ], + [ + 1649, + 143 + ], + [ + 1650, + 145 + ], + [ + 1661, + 146 + ], + [ + 1661, + 150 + ], + [ + 1652, + 150 + ], + [ + 1652, + 161 + ], + [ + 1659, + 162 + ], + [ + 1659, + 166 + ], + [ + 1650, + 167 + ], + [ + 1650, + 175 + ], + [ + 1662, + 177 + ], + [ + 1662, + 181 + ], + [ + 1651, + 185 + ], + [ + 1649, + 193 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1647, + 251 + ], + [ + 1647, + 265 + ], + [ + 1607, + 265 + ], + [ + 1607, + 251 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1609, + 366 + ], + [ + 1633, + 367 + ], + [ + 1641, + 365 + ], + [ + 1635, + 358 + ], + [ + 1633, + 355 + ], + [ + 1639, + 351 + ], + [ + 1640, + 346 + ], + [ + 1633, + 341 + ], + [ + 1637, + 338 + ], + [ + 1640, + 333 + ], + [ + 1629, + 329 + ], + [ + 1623, + 328 + ], + [ + 1624, + 322 + ], + [ + 1630, + 322 + ], + [ + 1634, + 313 + ], + [ + 1617, + 312 + ], + [ + 1602, + 313 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1593, + 363 + ], + [ + 1567, + 362 + ], + [ + 1568, + 307 + ], + [ + 1598, + 309 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1618, + 356 + ], + [ + 1605, + 309 + ], + [ + 1633, + 306 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1639, + 203 + ], + [ + 1649, + 212 + ], + [ + 1651, + 222 + ], + [ + 1651, + 238 + ], + [ + 1646, + 247 + ], + [ + 1636, + 254 + ], + [ + 1625, + 257 + ], + [ + 1609, + 257 + ], + [ + 1597, + 248 + ], + [ + 1592, + 235 + ], + [ + 1592, + 219 + ], + [ + 1599, + 208 + ], + [ + 1614, + 201 + ], + [ + 1625, + 201 + ], + [ + 1635, + 202 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1623, + 304 + ], + [ + 1597, + 278 + ], + [ + 1624, + 255 + ], + [ + 1651, + 279 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1571, + 381 + ], + [ + 1571, + 97 + ], + [ + 1570, + 78 + ], + [ + 1560, + 62 + ], + [ + 1542, + 41 + ], + [ + 1528, + 34 + ], + [ + 1521, + 29 + ], + [ + 1416, + 4 + ], + [ + 1393, + 0 + ], + [ + 1414, + 0 + ], + [ + 1426, + 0 + ], + [ + 1433, + 1 + ], + [ + 1527, + 25 + ], + [ + 1545, + 35 + ], + [ + 1564, + 53 + ], + [ + 1578, + 80 + ], + [ + 1579, + 110 + ], + [ + 1577, + 390 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1560, + 253 + ], + [ + 1562, + 338 + ], + [ + 1546, + 336 + ], + [ + 1543, + 332 + ], + [ + 1543, + 328 + ], + [ + 1526, + 320 + ], + [ + 1523, + 313 + ], + [ + 1523, + 312 + ], + [ + 1543, + 310 + ], + [ + 1543, + 298 + ], + [ + 1527, + 296 + ], + [ + 1524, + 292 + ], + [ + 1524, + 288 + ], + [ + 1526, + 285 + ], + [ + 1542, + 284 + ], + [ + 1543, + 274 + ], + [ + 1529, + 272 + ], + [ + 1524, + 268 + ], + [ + 1524, + 261 + ], + [ + 1525, + 260 + ], + [ + 1544, + 257 + ], + [ + 1544, + 256 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1555, + 246 + ], + [ + 1585, + 245 + ], + [ + 1589, + 335 + ], + [ + 1558, + 337 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1235, + 45 + ], + [ + 1233, + 0 + ], + [ + 1278, + 0 + ], + [ + 1280, + 13 + ], + [ + 1280, + 45 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 2012, + 574 + ], + [ + 1949, + 567 + ], + [ + 1950, + 506 + ], + [ + 1948, + 411 + ], + [ + 1925, + 254 + ], + [ + 1905, + 214 + ], + [ + 1880, + 157 + ], + [ + 1878, + 126 + ], + [ + 1881, + 96 + ], + [ + 1874, + 58 + ], + [ + 1866, + 27 + ], + [ + 1857, + 1 + ], + [ + 1855, + 0 + ], + [ + 1888, + 0 + ], + [ + 1887, + 2 + ], + [ + 1889, + 19 + ], + [ + 1894, + 17 + ], + [ + 1895, + 0 + ], + [ + 1922, + 0 + ], + [ + 1920, + 6 + ], + [ + 1913, + 64 + ], + [ + 1916, + 120 + ], + [ + 1923, + 154 + ], + [ + 1942, + 206 + ], + [ + 1951, + 235 + ], + [ + 1970, + 331 + ], + [ + 1980, + 413 + ], + [ + 1987, + 461 + ], + [ + 1987, + 505 + ], + [ + 1993, + 543 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1341, + 560 + ], + [ + 1332, + 559 + ], + [ + 1327, + 527 + ], + [ + 1326, + 501 + ], + [ + 1331, + 484 + ], + [ + 1333, + 462 + ], + [ + 1343, + 450 + ], + [ + 1325, + 452 + ], + [ + 1320, + 447 + ], + [ + 1324, + 437 + ], + [ + 1334, + 433 + ], + [ + 1339, + 433 + ], + [ + 1346, + 437 + ], + [ + 1346, + 438 + ], + [ + 1355, + 410 + ], + [ + 1360, + 400 + ], + [ + 1365, + 394 + ], + [ + 1371, + 390 + ], + [ + 1377, + 387 + ], + [ + 1386, + 388 + ], + [ + 1391, + 391 + ], + [ + 1449, + 386 + ], + [ + 1470, + 386 + ], + [ + 1481, + 393 + ], + [ + 1460, + 546 + ], + [ + 1410, + 555 + ], + [ + 1390, + 556 + ], + [ + 1377, + 556 + ], + [ + 1376, + 563 + ], + [ + 1376, + 572 + ], + [ + 1372, + 574 + ], + [ + 1353, + 574 + ], + [ + 1350, + 570 + ], + [ + 1345, + 561 + ], + [ + 1348, + 558 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1424, + 606 + ], + [ + 1389, + 602 + ], + [ + 1388, + 576 + ], + [ + 1387, + 543 + ], + [ + 1391, + 519 + ], + [ + 1392, + 513 + ], + [ + 1392, + 501 + ], + [ + 1409, + 477 + ], + [ + 1419, + 464 + ], + [ + 1411, + 460 + ], + [ + 1395, + 460 + ], + [ + 1386, + 452 + ], + [ + 1387, + 443 + ], + [ + 1393, + 441 + ], + [ + 1405, + 440 + ], + [ + 1410, + 443 + ], + [ + 1415, + 449 + ], + [ + 1424, + 450 + ], + [ + 1431, + 432 + ], + [ + 1446, + 409 + ], + [ + 1470, + 381 + ], + [ + 1485, + 377 + ], + [ + 1523, + 372 + ], + [ + 1591, + 373 + ], + [ + 1644, + 377 + ], + [ + 1682, + 389 + ], + [ + 1709, + 427 + ], + [ + 1723, + 473 + ], + [ + 1730, + 484 + ], + [ + 1742, + 491 + ], + [ + 1747, + 505 + ], + [ + 1754, + 611 + ], + [ + 1744, + 635 + ], + [ + 1736, + 643 + ], + [ + 1722, + 643 + ], + [ + 1706, + 638 + ], + [ + 1695, + 630 + ], + [ + 1693, + 612 + ], + [ + 1665, + 612 + ], + [ + 1555, + 608 + ], + [ + 1553, + 612 + ], + [ + 1527, + 615 + ], + [ + 1520, + 606 + ], + [ + 1487, + 606 + ], + [ + 1486, + 633 + ], + [ + 1483, + 640 + ], + [ + 1459, + 640 + ], + [ + 1442, + 631 + ], + [ + 1438, + 619 + ], + [ + 1438, + 602 + ], + [ + 1439, + 587 + ], + [ + 1426, + 584 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1854, + 572 + ], + [ + 1845, + 497 + ], + [ + 1843, + 486 + ], + [ + 1836, + 486 + ], + [ + 1827, + 493 + ], + [ + 1821, + 569 + ], + [ + 1820, + 573 + ], + [ + 1826, + 571 + ], + [ + 1829, + 524 + ], + [ + 1832, + 507 + ], + [ + 1834, + 503 + ], + [ + 1838, + 512 + ], + [ + 1844, + 574 + ], + [ + 1844, + 574 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 965, + 571 + ], + [ + 962, + 548 + ], + [ + 962, + 527 + ], + [ + 963, + 518 + ], + [ + 962, + 499 + ], + [ + 962, + 481 + ], + [ + 968, + 468 + ], + [ + 970, + 463 + ], + [ + 974, + 459 + ], + [ + 958, + 458 + ], + [ + 958, + 449 + ], + [ + 962, + 443 + ], + [ + 971, + 443 + ], + [ + 980, + 448 + ], + [ + 981, + 450 + ], + [ + 982, + 450 + ], + [ + 1004, + 409 + ], + [ + 1009, + 408 + ], + [ + 1019, + 406 + ], + [ + 1045, + 402 + ], + [ + 1071, + 401 + ], + [ + 1100, + 402 + ], + [ + 1122, + 404 + ], + [ + 1134, + 406 + ], + [ + 1138, + 409 + ], + [ + 1157, + 438 + ], + [ + 1160, + 441 + ], + [ + 1168, + 441 + ], + [ + 1175, + 442 + ], + [ + 1180, + 444 + ], + [ + 1181, + 453 + ], + [ + 1172, + 458 + ], + [ + 1169, + 460 + ], + [ + 1171, + 464 + ], + [ + 1177, + 477 + ], + [ + 1185, + 493 + ], + [ + 1183, + 508 + ], + [ + 1183, + 541 + ], + [ + 1183, + 565 + ], + [ + 1181, + 572 + ], + [ + 1169, + 576 + ], + [ + 1158, + 576 + ], + [ + 1149, + 574 + ], + [ + 1147, + 563 + ], + [ + 1147, + 553 + ], + [ + 1030, + 552 + ], + [ + 1020, + 554 + ], + [ + 1003, + 558 + ], + [ + 1001, + 558 + ], + [ + 998, + 567 + ], + [ + 997, + 574 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1039, + 490 + ], + [ + 1039, + 472 + ], + [ + 1115, + 471 + ], + [ + 1112, + 488 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1661, + 533 + ], + [ + 1566, + 533 + ], + [ + 1565, + 505 + ], + [ + 1660, + 509 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 215, + 572 + ], + [ + 0, + 607 + ], + [ + 0, + 504 + ], + [ + 4, + 498 + ], + [ + 13, + 496 + ], + [ + 32, + 505 + ], + [ + 45, + 508 + ], + [ + 61, + 504 + ], + [ + 75, + 497 + ], + [ + 93, + 499 + ], + [ + 103, + 501 + ], + [ + 117, + 493 + ], + [ + 132, + 493 + ], + [ + 144, + 497 + ], + [ + 153, + 499 + ], + [ + 163, + 491 + ], + [ + 173, + 491 + ], + [ + 185, + 502 + ], + [ + 194, + 514 + ], + [ + 204, + 518 + ], + [ + 221, + 524 + ], + [ + 231, + 544 + ], + [ + 225, + 566 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000145_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000145_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5adb487e4ed8552e21451c5a15c0ed57a9372fa3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000145_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000146_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000146_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..119c6c1a5a249e70e9cf7e070a28411af6326646 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000146_000019_gtFine_polygons.json @@ -0,0 +1,7365 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 605, + 46 + ], + [ + 986, + 280 + ], + [ + 1072, + 297 + ], + [ + 1301, + 260 + ], + [ + 1424, + 0 + ], + [ + 574, + 0 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 2009, + 560 + ], + [ + 1357, + 503 + ], + [ + 1243, + 477 + ], + [ + 1177, + 465 + ], + [ + 1170, + 452 + ], + [ + 1117, + 443 + ], + [ + 1042, + 443 + ], + [ + 935, + 458 + ], + [ + 795, + 482 + ], + [ + 156, + 556 + ], + [ + 0, + 580 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 563 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2021, + 604 + ], + [ + 1790, + 596 + ], + [ + 1598, + 569 + ], + [ + 1391, + 528 + ], + [ + 1261, + 506 + ], + [ + 1315, + 499 + ], + [ + 1272, + 480 + ], + [ + 1332, + 467 + ], + [ + 2048, + 491 + ], + [ + 2048, + 604 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 277, + 577 + ], + [ + 439, + 553 + ], + [ + 586, + 529 + ], + [ + 740, + 514 + ], + [ + 771, + 511 + ], + [ + 842, + 505 + ], + [ + 893, + 503 + ], + [ + 927, + 498 + ], + [ + 923, + 493 + ], + [ + 877, + 490 + ], + [ + 831, + 482 + ], + [ + 764, + 474 + ], + [ + 133, + 498 + ], + [ + 153, + 589 + ], + [ + 222, + 588 + ], + [ + 245, + 585 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1135, + 596 + ], + [ + 1139, + 596 + ], + [ + 1137, + 583 + ], + [ + 1136, + 579 + ], + [ + 1056, + 532 + ], + [ + 1041, + 519 + ], + [ + 1029, + 518 + ], + [ + 1017, + 519 + ], + [ + 977, + 545 + ], + [ + 914, + 587 + ], + [ + 915, + 592 + ], + [ + 921, + 594 + ], + [ + 952, + 599 + ], + [ + 1063, + 599 + ], + [ + 1106, + 599 + ], + [ + 1125, + 598 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1333, + 24 + ], + [ + 1320, + 25 + ], + [ + 1310, + 42 + ], + [ + 1313, + 43 + ], + [ + 1316, + 46 + ], + [ + 1316, + 50 + ], + [ + 1316, + 54 + ], + [ + 1304, + 55 + ], + [ + 1300, + 62 + ], + [ + 1287, + 61 + ], + [ + 1257, + 84 + ], + [ + 1250, + 102 + ], + [ + 1253, + 104 + ], + [ + 1252, + 114 + ], + [ + 1256, + 117 + ], + [ + 1256, + 126 + ], + [ + 1254, + 132 + ], + [ + 1250, + 132 + ], + [ + 1244, + 147 + ], + [ + 1197, + 232 + ], + [ + 1137, + 233 + ], + [ + 1109, + 229 + ], + [ + 1107, + 110 + ], + [ + 1106, + 110 + ], + [ + 1105, + 231 + ], + [ + 1090, + 231 + ], + [ + 1091, + 239 + ], + [ + 1088, + 242 + ], + [ + 1090, + 249 + ], + [ + 1090, + 253 + ], + [ + 1082, + 254 + ], + [ + 1080, + 259 + ], + [ + 1069, + 259 + ], + [ + 1069, + 262 + ], + [ + 1072, + 265 + ], + [ + 1071, + 271 + ], + [ + 1069, + 272 + ], + [ + 1066, + 270 + ], + [ + 825, + 111 + ], + [ + 822, + 103 + ], + [ + 819, + 100 + ], + [ + 808, + 99 + ], + [ + 808, + 91 + ], + [ + 807, + 84 + ], + [ + 810, + 83 + ], + [ + 805, + 79 + ], + [ + 803, + 76 + ], + [ + 803, + 72 + ], + [ + 803, + 65 + ], + [ + 800, + 63 + ], + [ + 800, + 58 + ], + [ + 788, + 58 + ], + [ + 786, + 53 + ], + [ + 780, + 50 + ], + [ + 768, + 49 + ], + [ + 758, + 35 + ], + [ + 759, + 31 + ], + [ + 763, + 30 + ], + [ + 764, + 30 + ], + [ + 768, + 25 + ], + [ + 756, + 18 + ], + [ + 744, + 14 + ], + [ + 740, + 15 + ], + [ + 740, + 6 + ], + [ + 737, + 2 + ], + [ + 729, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 495 + ], + [ + 13, + 517 + ], + [ + 146, + 525 + ], + [ + 229, + 525 + ], + [ + 309, + 527 + ], + [ + 345, + 527 + ], + [ + 394, + 526 + ], + [ + 491, + 516 + ], + [ + 541, + 507 + ], + [ + 586, + 505 + ], + [ + 699, + 494 + ], + [ + 751, + 488 + ], + [ + 791, + 486 + ], + [ + 815, + 481 + ], + [ + 937, + 474 + ], + [ + 992, + 469 + ], + [ + 1036, + 469 + ], + [ + 1047, + 469 + ], + [ + 1067, + 469 + ], + [ + 1100, + 467 + ], + [ + 1114, + 470 + ], + [ + 1142, + 477 + ], + [ + 1195, + 474 + ], + [ + 1262, + 480 + ], + [ + 1317, + 484 + ], + [ + 1347, + 486 + ], + [ + 1368, + 490 + ], + [ + 1403, + 496 + ], + [ + 1458, + 504 + ], + [ + 1511, + 507 + ], + [ + 1542, + 517 + ], + [ + 1568, + 519 + ], + [ + 1631, + 519 + ], + [ + 1758, + 517 + ], + [ + 1844, + 517 + ], + [ + 1974, + 517 + ], + [ + 2048, + 518 + ], + [ + 2048, + 0 + ], + [ + 2048, + 0 + ], + [ + 1334, + 0 + ], + [ + 1327, + 8 + ], + [ + 1327, + 10 + ], + [ + 1334, + 10 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1161, + 466 + ], + [ + 1163, + 453 + ], + [ + 1164, + 444 + ], + [ + 1165, + 431 + ], + [ + 1165, + 414 + ], + [ + 1161, + 408 + ], + [ + 1151, + 403 + ], + [ + 1149, + 399 + ], + [ + 1145, + 395 + ], + [ + 1139, + 391 + ], + [ + 1133, + 387 + ], + [ + 1127, + 389 + ], + [ + 1113, + 392 + ], + [ + 1111, + 388 + ], + [ + 1117, + 382 + ], + [ + 1120, + 378 + ], + [ + 1115, + 376 + ], + [ + 1109, + 380 + ], + [ + 1102, + 381 + ], + [ + 1100, + 385 + ], + [ + 1089, + 387 + ], + [ + 1087, + 381 + ], + [ + 1089, + 378 + ], + [ + 1089, + 367 + ], + [ + 1086, + 362 + ], + [ + 1079, + 359 + ], + [ + 1076, + 352 + ], + [ + 1073, + 347 + ], + [ + 1072, + 339 + ], + [ + 1078, + 327 + ], + [ + 1082, + 313 + ], + [ + 1083, + 312 + ], + [ + 1084, + 305 + ], + [ + 1084, + 295 + ], + [ + 1084, + 283 + ], + [ + 1089, + 270 + ], + [ + 1097, + 267 + ], + [ + 1107, + 261 + ], + [ + 1112, + 258 + ], + [ + 1109, + 253 + ], + [ + 1106, + 249 + ], + [ + 1107, + 237 + ], + [ + 1108, + 224 + ], + [ + 1113, + 216 + ], + [ + 1115, + 214 + ], + [ + 1115, + 211 + ], + [ + 1114, + 200 + ], + [ + 1123, + 197 + ], + [ + 1131, + 197 + ], + [ + 1140, + 198 + ], + [ + 1140, + 203 + ], + [ + 1144, + 205 + ], + [ + 1153, + 201 + ], + [ + 1155, + 198 + ], + [ + 1149, + 196 + ], + [ + 1140, + 191 + ], + [ + 1137, + 186 + ], + [ + 1139, + 183 + ], + [ + 1145, + 181 + ], + [ + 1144, + 177 + ], + [ + 1142, + 171 + ], + [ + 1150, + 168 + ], + [ + 1157, + 169 + ], + [ + 1161, + 173 + ], + [ + 1164, + 174 + ], + [ + 1173, + 174 + ], + [ + 1179, + 174 + ], + [ + 1184, + 177 + ], + [ + 1191, + 180 + ], + [ + 1196, + 184 + ], + [ + 1199, + 187 + ], + [ + 1197, + 182 + ], + [ + 1192, + 178 + ], + [ + 1184, + 168 + ], + [ + 1181, + 153 + ], + [ + 1181, + 148 + ], + [ + 1185, + 138 + ], + [ + 1194, + 134 + ], + [ + 1199, + 134 + ], + [ + 1203, + 142 + ], + [ + 1204, + 148 + ], + [ + 1208, + 145 + ], + [ + 1210, + 136 + ], + [ + 1209, + 130 + ], + [ + 1211, + 121 + ], + [ + 1219, + 120 + ], + [ + 1222, + 117 + ], + [ + 1225, + 111 + ], + [ + 1231, + 111 + ], + [ + 1235, + 118 + ], + [ + 1243, + 119 + ], + [ + 1248, + 121 + ], + [ + 1242, + 132 + ], + [ + 1243, + 137 + ], + [ + 1252, + 139 + ], + [ + 1252, + 134 + ], + [ + 1255, + 121 + ], + [ + 1257, + 110 + ], + [ + 1266, + 104 + ], + [ + 1275, + 105 + ], + [ + 1277, + 112 + ], + [ + 1281, + 125 + ], + [ + 1284, + 134 + ], + [ + 1286, + 143 + ], + [ + 1287, + 150 + ], + [ + 1293, + 155 + ], + [ + 1308, + 157 + ], + [ + 1313, + 158 + ], + [ + 1313, + 162 + ], + [ + 1305, + 169 + ], + [ + 1307, + 176 + ], + [ + 1308, + 176 + ], + [ + 1320, + 172 + ], + [ + 1323, + 173 + ], + [ + 1326, + 177 + ], + [ + 1326, + 187 + ], + [ + 1325, + 196 + ], + [ + 1325, + 202 + ], + [ + 1325, + 216 + ], + [ + 1323, + 222 + ], + [ + 1314, + 228 + ], + [ + 1307, + 234 + ], + [ + 1306, + 240 + ], + [ + 1307, + 247 + ], + [ + 1318, + 251 + ], + [ + 1323, + 255 + ], + [ + 1316, + 256 + ], + [ + 1307, + 260 + ], + [ + 1306, + 263 + ], + [ + 1312, + 270 + ], + [ + 1316, + 276 + ], + [ + 1308, + 284 + ], + [ + 1297, + 288 + ], + [ + 1302, + 293 + ], + [ + 1320, + 293 + ], + [ + 1330, + 297 + ], + [ + 1335, + 300 + ], + [ + 1324, + 304 + ], + [ + 1309, + 304 + ], + [ + 1300, + 306 + ], + [ + 1291, + 312 + ], + [ + 1282, + 319 + ], + [ + 1272, + 323 + ], + [ + 1268, + 329 + ], + [ + 1268, + 337 + ], + [ + 1280, + 337 + ], + [ + 1291, + 339 + ], + [ + 1286, + 346 + ], + [ + 1280, + 350 + ], + [ + 1294, + 353 + ], + [ + 1297, + 353 + ], + [ + 1292, + 360 + ], + [ + 1285, + 366 + ], + [ + 1273, + 374 + ], + [ + 1272, + 380 + ], + [ + 1272, + 383 + ], + [ + 1266, + 384 + ], + [ + 1254, + 385 + ], + [ + 1243, + 385 + ], + [ + 1240, + 393 + ], + [ + 1243, + 398 + ], + [ + 1240, + 401 + ], + [ + 1242, + 405 + ], + [ + 1249, + 409 + ], + [ + 1249, + 414 + ], + [ + 1242, + 417 + ], + [ + 1236, + 418 + ], + [ + 1238, + 469 + ], + [ + 1229, + 469 + ], + [ + 1230, + 439 + ], + [ + 1230, + 436 + ], + [ + 1220, + 434 + ], + [ + 1214, + 429 + ], + [ + 1211, + 427 + ], + [ + 1211, + 450 + ], + [ + 1206, + 448 + ], + [ + 1202, + 438 + ], + [ + 1201, + 430 + ], + [ + 1193, + 426 + ], + [ + 1185, + 425 + ], + [ + 1180, + 427 + ], + [ + 1180, + 467 + ], + [ + 1180, + 471 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1104, + 467 + ], + [ + 1104, + 464 + ], + [ + 1102, + 455 + ], + [ + 1101, + 450 + ], + [ + 1089, + 449 + ], + [ + 1083, + 450 + ], + [ + 1077, + 461 + ], + [ + 1082, + 472 + ], + [ + 1087, + 473 + ], + [ + 1088, + 471 + ], + [ + 1099, + 471 + ], + [ + 1099, + 473 + ], + [ + 1104, + 473 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1120, + 411 + ], + [ + 1120, + 399 + ], + [ + 1128, + 399 + ], + [ + 1128, + 411 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1137, + 408 + ], + [ + 1137, + 400 + ], + [ + 1130, + 400 + ], + [ + 1130, + 407 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1188, + 449 + ], + [ + 1186, + 428 + ], + [ + 1196, + 429 + ], + [ + 1196, + 447 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1142, + 433 + ], + [ + 1137, + 433 + ], + [ + 1139, + 440 + ], + [ + 1143, + 444 + ], + [ + 1144, + 450 + ], + [ + 1145, + 459 + ], + [ + 1151, + 460 + ], + [ + 1157, + 461 + ], + [ + 1156, + 437 + ], + [ + 1156, + 434 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1139, + 460 + ], + [ + 1131, + 457 + ], + [ + 1122, + 457 + ], + [ + 1115, + 459 + ], + [ + 1110, + 464 + ], + [ + 1110, + 472 + ], + [ + 1112, + 476 + ], + [ + 1112, + 479 + ], + [ + 1114, + 479 + ], + [ + 1115, + 478 + ], + [ + 1121, + 477 + ], + [ + 1127, + 477 + ], + [ + 1130, + 477 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1179, + 482 + ], + [ + 1172, + 480 + ], + [ + 1163, + 477 + ], + [ + 1154, + 465 + ], + [ + 1153, + 458 + ], + [ + 1157, + 454 + ], + [ + 1163, + 450 + ], + [ + 1167, + 450 + ], + [ + 1170, + 451 + ], + [ + 1175, + 453 + ], + [ + 1179, + 455 + ], + [ + 1185, + 450 + ], + [ + 1188, + 448 + ], + [ + 1192, + 442 + ], + [ + 1198, + 441 + ], + [ + 1215, + 440 + ], + [ + 1225, + 440 + ], + [ + 1229, + 445 + ], + [ + 1230, + 453 + ], + [ + 1227, + 467 + ], + [ + 1214, + 483 + ], + [ + 1205, + 487 + ], + [ + 1197, + 487 + ], + [ + 1187, + 485 + ], + [ + 1183, + 484 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1218, + 488 + ], + [ + 1211, + 490 + ], + [ + 1199, + 490 + ], + [ + 1197, + 484 + ], + [ + 1195, + 474 + ], + [ + 1195, + 465 + ], + [ + 1197, + 458 + ], + [ + 1199, + 451 + ], + [ + 1205, + 450 + ], + [ + 1217, + 450 + ], + [ + 1225, + 450 + ], + [ + 1230, + 455 + ], + [ + 1232, + 467 + ], + [ + 1230, + 474 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1270, + 435 + ], + [ + 1242, + 436 + ], + [ + 1243, + 422 + ], + [ + 1247, + 411 + ], + [ + 1252, + 410 + ], + [ + 1262, + 410 + ], + [ + 1271, + 411 + ], + [ + 1272, + 413 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1279, + 463 + ], + [ + 1279, + 383 + ], + [ + 1281, + 383 + ], + [ + 1280, + 470 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1270, + 389 + ], + [ + 1267, + 383 + ], + [ + 1270, + 378 + ], + [ + 1272, + 377 + ], + [ + 1277, + 377 + ], + [ + 1280, + 378 + ], + [ + 1280, + 381 + ], + [ + 1280, + 386 + ], + [ + 1280, + 391 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1311, + 417 + ], + [ + 1311, + 382 + ], + [ + 1349, + 383 + ], + [ + 1348, + 418 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1257, + 459 + ], + [ + 1253, + 455 + ], + [ + 1243, + 454 + ], + [ + 1231, + 455 + ], + [ + 1224, + 457 + ], + [ + 1222, + 464 + ], + [ + 1216, + 468 + ], + [ + 1214, + 474 + ], + [ + 1214, + 481 + ], + [ + 1214, + 491 + ], + [ + 1219, + 496 + ], + [ + 1229, + 495 + ], + [ + 1238, + 490 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1271, + 464 + ], + [ + 1270, + 457 + ], + [ + 1264, + 454 + ], + [ + 1253, + 454 + ], + [ + 1243, + 456 + ], + [ + 1237, + 460 + ], + [ + 1231, + 465 + ], + [ + 1231, + 469 + ], + [ + 1227, + 473 + ], + [ + 1225, + 480 + ], + [ + 1228, + 493 + ], + [ + 1228, + 500 + ], + [ + 1231, + 504 + ], + [ + 1238, + 504 + ], + [ + 1245, + 502 + ], + [ + 1257, + 496 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1345, + 479 + ], + [ + 1349, + 483 + ], + [ + 1349, + 494 + ], + [ + 1347, + 503 + ], + [ + 1339, + 504 + ], + [ + 1333, + 500 + ], + [ + 1332, + 487 + ], + [ + 1322, + 487 + ], + [ + 1316, + 480 + ], + [ + 1321, + 480 + ], + [ + 1327, + 481 + ], + [ + 1332, + 478 + ], + [ + 1323, + 477 + ], + [ + 1323, + 473 + ], + [ + 1331, + 472 + ], + [ + 1336, + 472 + ], + [ + 1340, + 477 + ], + [ + 1338, + 481 + ], + [ + 1338, + 481 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1251, + 507 + ], + [ + 1244, + 509 + ], + [ + 1240, + 506 + ], + [ + 1240, + 496 + ], + [ + 1243, + 485 + ], + [ + 1247, + 477 + ], + [ + 1249, + 476 + ], + [ + 1244, + 476 + ], + [ + 1245, + 472 + ], + [ + 1251, + 472 + ], + [ + 1252, + 466 + ], + [ + 1257, + 460 + ], + [ + 1263, + 459 + ], + [ + 1281, + 458 + ], + [ + 1294, + 457 + ], + [ + 1297, + 460 + ], + [ + 1305, + 464 + ], + [ + 1312, + 468 + ], + [ + 1319, + 475 + ], + [ + 1321, + 487 + ], + [ + 1322, + 501 + ], + [ + 1321, + 505 + ], + [ + 1313, + 502 + ], + [ + 1313, + 499 + ], + [ + 1286, + 500 + ], + [ + 1268, + 502 + ], + [ + 1260, + 504 + ], + [ + 1259, + 506 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1290, + 403 + ], + [ + 1289, + 510 + ], + [ + 1293, + 510 + ], + [ + 1292, + 394 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1310, + 506 + ], + [ + 1304, + 509 + ], + [ + 1295, + 508 + ], + [ + 1292, + 503 + ], + [ + 1288, + 500 + ], + [ + 1282, + 501 + ], + [ + 1277, + 501 + ], + [ + 1270, + 506 + ], + [ + 1261, + 510 + ], + [ + 1257, + 507 + ], + [ + 1252, + 494 + ], + [ + 1253, + 491 + ], + [ + 1252, + 478 + ], + [ + 1255, + 477 + ], + [ + 1263, + 473 + ], + [ + 1267, + 473 + ], + [ + 1266, + 469 + ], + [ + 1266, + 466 + ], + [ + 1273, + 466 + ], + [ + 1282, + 464 + ], + [ + 1288, + 464 + ], + [ + 1288, + 469 + ], + [ + 1280, + 471 + ], + [ + 1278, + 477 + ], + [ + 1295, + 472 + ], + [ + 1299, + 471 + ], + [ + 1292, + 469 + ], + [ + 1291, + 464 + ], + [ + 1299, + 464 + ], + [ + 1305, + 464 + ], + [ + 1306, + 467 + ], + [ + 1305, + 475 + ], + [ + 1308, + 477 + ], + [ + 1318, + 481 + ], + [ + 1322, + 488 + ], + [ + 1322, + 496 + ], + [ + 1320, + 503 + ], + [ + 1314, + 505 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1300, + 376 + ], + [ + 1299, + 414 + ], + [ + 1281, + 413 + ], + [ + 1281, + 377 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1330, + 401 + ], + [ + 1330, + 504 + ], + [ + 1333, + 504 + ], + [ + 1332, + 398 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1336, + 391 + ], + [ + 1335, + 423 + ], + [ + 1325, + 420 + ], + [ + 1325, + 391 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1348, + 367 + ], + [ + 1347, + 410 + ], + [ + 1387, + 410 + ], + [ + 1386, + 368 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1373, + 364 + ], + [ + 1374, + 321 + ], + [ + 1415, + 325 + ], + [ + 1412, + 363 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1436, + 523 + ], + [ + 1414, + 526 + ], + [ + 1399, + 519 + ], + [ + 1385, + 519 + ], + [ + 1377, + 519 + ], + [ + 1379, + 498 + ], + [ + 1408, + 499 + ], + [ + 1436, + 501 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1438, + 492 + ], + [ + 1438, + 487 + ], + [ + 1436, + 482 + ], + [ + 1428, + 479 + ], + [ + 1417, + 475 + ], + [ + 1416, + 468 + ], + [ + 1405, + 470 + ], + [ + 1394, + 472 + ], + [ + 1388, + 476 + ], + [ + 1386, + 482 + ], + [ + 1378, + 486 + ], + [ + 1378, + 494 + ], + [ + 1376, + 505 + ], + [ + 1376, + 512 + ], + [ + 1381, + 514 + ], + [ + 1382, + 500 + ], + [ + 1388, + 498 + ], + [ + 1398, + 500 + ], + [ + 1405, + 503 + ], + [ + 1406, + 506 + ], + [ + 1412, + 508 + ], + [ + 1414, + 503 + ], + [ + 1432, + 501 + ], + [ + 1435, + 503 + ], + [ + 1437, + 504 + ], + [ + 1442, + 501 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1529, + 539 + ], + [ + 1501, + 539 + ], + [ + 1490, + 537 + ], + [ + 1492, + 515 + ], + [ + 1520, + 509 + ], + [ + 1529, + 511 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1524, + 505 + ], + [ + 1520, + 498 + ], + [ + 1511, + 495 + ], + [ + 1498, + 494 + ], + [ + 1486, + 494 + ], + [ + 1480, + 500 + ], + [ + 1479, + 511 + ], + [ + 1481, + 520 + ], + [ + 1486, + 523 + ], + [ + 1490, + 523 + ], + [ + 1498, + 519 + ], + [ + 1505, + 514 + ], + [ + 1517, + 514 + ], + [ + 1522, + 518 + ], + [ + 1526, + 523 + ], + [ + 1532, + 525 + ], + [ + 1537, + 523 + ], + [ + 1538, + 516 + ], + [ + 1534, + 511 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1625, + 547 + ], + [ + 1614, + 547 + ], + [ + 1604, + 544 + ], + [ + 1579, + 542 + ], + [ + 1569, + 542 + ], + [ + 1563, + 537 + ], + [ + 1548, + 538 + ], + [ + 1544, + 533 + ], + [ + 1547, + 526 + ], + [ + 1548, + 513 + ], + [ + 1550, + 505 + ], + [ + 1558, + 505 + ], + [ + 1565, + 502 + ], + [ + 1567, + 496 + ], + [ + 1568, + 486 + ], + [ + 1581, + 488 + ], + [ + 1591, + 492 + ], + [ + 1598, + 502 + ], + [ + 1609, + 510 + ], + [ + 1615, + 519 + ], + [ + 1621, + 526 + ], + [ + 1626, + 535 + ], + [ + 1627, + 541 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1647, + 471 + ], + [ + 1646, + 533 + ], + [ + 1654, + 533 + ], + [ + 1654, + 472 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1522, + 283 + ], + [ + 1525, + 153 + ], + [ + 1579, + 155 + ], + [ + 1578, + 282 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1665, + 473 + ], + [ + 1665, + 532 + ], + [ + 1672, + 531 + ], + [ + 1672, + 473 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1743, + 463 + ], + [ + 1740, + 531 + ], + [ + 1805, + 529 + ], + [ + 1803, + 460 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1813, + 428 + ], + [ + 1814, + 526 + ], + [ + 1885, + 527 + ], + [ + 1886, + 521 + ], + [ + 1886, + 425 + ], + [ + 1843, + 425 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 2009, + 444 + ], + [ + 1993, + 450 + ], + [ + 1983, + 452 + ], + [ + 1980, + 456 + ], + [ + 1988, + 459 + ], + [ + 1994, + 461 + ], + [ + 2001, + 469 + ], + [ + 2002, + 477 + ], + [ + 1993, + 481 + ], + [ + 1988, + 491 + ], + [ + 1986, + 505 + ], + [ + 1988, + 511 + ], + [ + 2000, + 521 + ], + [ + 2023, + 521 + ], + [ + 2030, + 518 + ], + [ + 2033, + 506 + ], + [ + 2031, + 495 + ], + [ + 2022, + 489 + ], + [ + 2013, + 482 + ], + [ + 2012, + 472 + ], + [ + 2011, + 464 + ], + [ + 2013, + 456 + ], + [ + 2018, + 451 + ], + [ + 2016, + 446 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2035, + 462 + ], + [ + 2035, + 515 + ], + [ + 2041, + 515 + ], + [ + 2041, + 463 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 2041, + 559 + ], + [ + 1898, + 547 + ], + [ + 1824, + 538 + ], + [ + 1774, + 536 + ], + [ + 1720, + 529 + ], + [ + 1737, + 524 + ], + [ + 1771, + 524 + ], + [ + 1802, + 518 + ], + [ + 1818, + 514 + ], + [ + 1831, + 514 + ], + [ + 1870, + 516 + ], + [ + 1895, + 512 + ], + [ + 1917, + 509 + ], + [ + 1943, + 508 + ], + [ + 1966, + 508 + ], + [ + 1990, + 507 + ], + [ + 2013, + 507 + ], + [ + 2031, + 508 + ], + [ + 2048, + 511 + ], + [ + 2048, + 560 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1942, + 404 + ], + [ + 1940, + 487 + ], + [ + 1933, + 509 + ], + [ + 1934, + 515 + ], + [ + 1983, + 515 + ], + [ + 1984, + 515 + ], + [ + 1986, + 514 + ], + [ + 1979, + 481 + ], + [ + 1974, + 417 + ], + [ + 1975, + 368 + ], + [ + 1981, + 276 + ], + [ + 1973, + 184 + ], + [ + 1969, + 98 + ], + [ + 1966, + 73 + ], + [ + 1977, + 62 + ], + [ + 1991, + 57 + ], + [ + 1984, + 76 + ], + [ + 1972, + 90 + ], + [ + 1978, + 100 + ], + [ + 1989, + 97 + ], + [ + 1999, + 82 + ], + [ + 2005, + 82 + ], + [ + 2008, + 93 + ], + [ + 2005, + 104 + ], + [ + 2011, + 109 + ], + [ + 2025, + 99 + ], + [ + 2034, + 92 + ], + [ + 2034, + 104 + ], + [ + 2031, + 112 + ], + [ + 2048, + 116 + ], + [ + 2048, + 116 + ], + [ + 2048, + 0 + ], + [ + 1489, + 0 + ], + [ + 1484, + 6 + ], + [ + 1483, + 10 + ], + [ + 1469, + 23 + ], + [ + 1459, + 32 + ], + [ + 1466, + 45 + ], + [ + 1474, + 44 + ], + [ + 1487, + 37 + ], + [ + 1501, + 33 + ], + [ + 1505, + 40 + ], + [ + 1507, + 54 + ], + [ + 1508, + 64 + ], + [ + 1510, + 76 + ], + [ + 1510, + 87 + ], + [ + 1504, + 102 + ], + [ + 1504, + 110 + ], + [ + 1511, + 124 + ], + [ + 1518, + 131 + ], + [ + 1532, + 127 + ], + [ + 1537, + 129 + ], + [ + 1529, + 145 + ], + [ + 1523, + 163 + ], + [ + 1525, + 175 + ], + [ + 1535, + 177 + ], + [ + 1547, + 164 + ], + [ + 1552, + 170 + ], + [ + 1541, + 178 + ], + [ + 1538, + 188 + ], + [ + 1557, + 193 + ], + [ + 1564, + 181 + ], + [ + 1582, + 171 + ], + [ + 1583, + 182 + ], + [ + 1575, + 195 + ], + [ + 1575, + 208 + ], + [ + 1573, + 222 + ], + [ + 1597, + 220 + ], + [ + 1610, + 191 + ], + [ + 1615, + 182 + ], + [ + 1612, + 166 + ], + [ + 1617, + 155 + ], + [ + 1621, + 163 + ], + [ + 1616, + 186 + ], + [ + 1616, + 200 + ], + [ + 1622, + 209 + ], + [ + 1628, + 219 + ], + [ + 1642, + 227 + ], + [ + 1646, + 236 + ], + [ + 1664, + 228 + ], + [ + 1667, + 238 + ], + [ + 1678, + 237 + ], + [ + 1695, + 243 + ], + [ + 1693, + 252 + ], + [ + 1692, + 266 + ], + [ + 1709, + 265 + ], + [ + 1710, + 254 + ], + [ + 1717, + 250 + ], + [ + 1726, + 247 + ], + [ + 1726, + 236 + ], + [ + 1728, + 224 + ], + [ + 1740, + 220 + ], + [ + 1747, + 220 + ], + [ + 1748, + 231 + ], + [ + 1747, + 242 + ], + [ + 1765, + 232 + ], + [ + 1774, + 215 + ], + [ + 1782, + 196 + ], + [ + 1788, + 185 + ], + [ + 1788, + 229 + ], + [ + 1790, + 246 + ], + [ + 1799, + 256 + ], + [ + 1796, + 263 + ], + [ + 1792, + 273 + ], + [ + 1795, + 284 + ], + [ + 1797, + 293 + ], + [ + 1813, + 290 + ], + [ + 1820, + 274 + ], + [ + 1822, + 269 + ], + [ + 1830, + 264 + ], + [ + 1825, + 259 + ], + [ + 1833, + 257 + ], + [ + 1840, + 255 + ], + [ + 1833, + 246 + ], + [ + 1834, + 240 + ], + [ + 1844, + 236 + ], + [ + 1843, + 225 + ], + [ + 1837, + 217 + ], + [ + 1835, + 201 + ], + [ + 1843, + 188 + ], + [ + 1848, + 178 + ], + [ + 1853, + 170 + ], + [ + 1855, + 181 + ], + [ + 1849, + 191 + ], + [ + 1844, + 205 + ], + [ + 1840, + 215 + ], + [ + 1853, + 224 + ], + [ + 1857, + 220 + ], + [ + 1870, + 229 + ], + [ + 1881, + 236 + ], + [ + 1902, + 233 + ], + [ + 1919, + 204 + ], + [ + 1921, + 179 + ], + [ + 1922, + 128 + ], + [ + 1921, + 103 + ], + [ + 1914, + 90 + ], + [ + 1914, + 70 + ], + [ + 1918, + 68 + ], + [ + 1918, + 75 + ], + [ + 1926, + 89 + ], + [ + 1944, + 124 + ], + [ + 1946, + 169 + ], + [ + 1947, + 213 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1833, + 470 + ], + [ + 1873, + 473 + ], + [ + 1869, + 528 + ], + [ + 1835, + 523 + ], + [ + 1830, + 516 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1859, + 228 + ], + [ + 1849, + 583 + ], + [ + 1866, + 587 + ], + [ + 1868, + 226 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1829, + 270 + ], + [ + 1861, + 265 + ], + [ + 1857, + 288 + ], + [ + 1829, + 290 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1958, + 231 + ], + [ + 1957, + 256 + ], + [ + 1930, + 258 + ], + [ + 1930, + 270 + ], + [ + 1893, + 270 + ], + [ + 1895, + 258 + ], + [ + 1867, + 256 + ], + [ + 1870, + 231 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1851, + 300 + ], + [ + 1841, + 299 + ], + [ + 1840, + 305 + ], + [ + 1822, + 309 + ], + [ + 1823, + 319 + ], + [ + 1826, + 323 + ], + [ + 1834, + 326 + ], + [ + 1835, + 332 + ], + [ + 1823, + 334 + ], + [ + 1822, + 342 + ], + [ + 1826, + 346 + ], + [ + 1833, + 349 + ], + [ + 1836, + 349 + ], + [ + 1835, + 360 + ], + [ + 1823, + 360 + ], + [ + 1823, + 369 + ], + [ + 1826, + 373 + ], + [ + 1834, + 375 + ], + [ + 1841, + 375 + ], + [ + 1841, + 383 + ], + [ + 1857, + 385 + ], + [ + 1856, + 298 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1893, + 332 + ], + [ + 1903, + 336 + ], + [ + 1910, + 342 + ], + [ + 1911, + 351 + ], + [ + 1910, + 360 + ], + [ + 1905, + 366 + ], + [ + 1896, + 371 + ], + [ + 1887, + 373 + ], + [ + 1877, + 369 + ], + [ + 1867, + 361 + ], + [ + 1867, + 350 + ], + [ + 1871, + 336 + ], + [ + 1877, + 331 + ], + [ + 1885, + 330 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 984, + 465 + ], + [ + 981, + 454 + ], + [ + 983, + 447 + ], + [ + 991, + 445 + ], + [ + 999, + 439 + ], + [ + 991, + 426 + ], + [ + 991, + 421 + ], + [ + 999, + 422 + ], + [ + 1005, + 425 + ], + [ + 1024, + 421 + ], + [ + 1025, + 408 + ], + [ + 1024, + 393 + ], + [ + 1028, + 387 + ], + [ + 1045, + 367 + ], + [ + 1045, + 333 + ], + [ + 1049, + 313 + ], + [ + 1049, + 292 + ], + [ + 1059, + 287 + ], + [ + 1069, + 280 + ], + [ + 1067, + 261 + ], + [ + 1071, + 219 + ], + [ + 1068, + 200 + ], + [ + 1068, + 175 + ], + [ + 1061, + 157 + ], + [ + 1049, + 147 + ], + [ + 1028, + 138 + ], + [ + 1023, + 132 + ], + [ + 1031, + 127 + ], + [ + 1024, + 118 + ], + [ + 1020, + 109 + ], + [ + 1015, + 114 + ], + [ + 1011, + 108 + ], + [ + 1012, + 105 + ], + [ + 996, + 94 + ], + [ + 979, + 85 + ], + [ + 950, + 83 + ], + [ + 953, + 76 + ], + [ + 954, + 68 + ], + [ + 955, + 61 + ], + [ + 925, + 49 + ], + [ + 915, + 50 + ], + [ + 911, + 49 + ], + [ + 909, + 37 + ], + [ + 899, + 41 + ], + [ + 891, + 39 + ], + [ + 882, + 39 + ], + [ + 874, + 39 + ], + [ + 864, + 48 + ], + [ + 852, + 56 + ], + [ + 847, + 60 + ], + [ + 836, + 59 + ], + [ + 834, + 68 + ], + [ + 834, + 76 + ], + [ + 822, + 86 + ], + [ + 815, + 95 + ], + [ + 807, + 104 + ], + [ + 797, + 111 + ], + [ + 786, + 113 + ], + [ + 776, + 121 + ], + [ + 773, + 129 + ], + [ + 765, + 139 + ], + [ + 765, + 144 + ], + [ + 774, + 147 + ], + [ + 776, + 151 + ], + [ + 772, + 157 + ], + [ + 765, + 167 + ], + [ + 765, + 174 + ], + [ + 769, + 180 + ], + [ + 777, + 181 + ], + [ + 778, + 190 + ], + [ + 777, + 204 + ], + [ + 782, + 216 + ], + [ + 783, + 229 + ], + [ + 787, + 244 + ], + [ + 803, + 254 + ], + [ + 814, + 259 + ], + [ + 801, + 262 + ], + [ + 798, + 274 + ], + [ + 821, + 278 + ], + [ + 815, + 284 + ], + [ + 796, + 283 + ], + [ + 798, + 293 + ], + [ + 816, + 298 + ], + [ + 812, + 307 + ], + [ + 809, + 316 + ], + [ + 817, + 329 + ], + [ + 833, + 337 + ], + [ + 851, + 346 + ], + [ + 866, + 367 + ], + [ + 869, + 380 + ], + [ + 870, + 399 + ], + [ + 861, + 459 + ], + [ + 871, + 461 + ], + [ + 877, + 438 + ], + [ + 878, + 408 + ], + [ + 880, + 399 + ], + [ + 886, + 400 + ], + [ + 883, + 455 + ], + [ + 893, + 460 + ], + [ + 896, + 449 + ], + [ + 897, + 422 + ], + [ + 901, + 408 + ], + [ + 907, + 407 + ], + [ + 896, + 455 + ], + [ + 907, + 460 + ], + [ + 907, + 434 + ], + [ + 917, + 421 + ], + [ + 920, + 443 + ], + [ + 924, + 453 + ], + [ + 929, + 437 + ], + [ + 926, + 422 + ], + [ + 926, + 414 + ], + [ + 934, + 408 + ], + [ + 937, + 410 + ], + [ + 933, + 457 + ], + [ + 934, + 464 + ], + [ + 938, + 452 + ], + [ + 940, + 427 + ], + [ + 942, + 416 + ], + [ + 945, + 417 + ], + [ + 946, + 430 + ], + [ + 941, + 457 + ], + [ + 947, + 462 + ], + [ + 964, + 461 + ], + [ + 965, + 450 + ], + [ + 966, + 436 + ], + [ + 970, + 425 + ], + [ + 975, + 424 + ], + [ + 978, + 432 + ], + [ + 976, + 452 + ], + [ + 976, + 462 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1023, + 466 + ], + [ + 1017, + 462 + ], + [ + 1013, + 460 + ], + [ + 1007, + 457 + ], + [ + 1000, + 457 + ], + [ + 996, + 457 + ], + [ + 990, + 455 + ], + [ + 983, + 453 + ], + [ + 977, + 452 + ], + [ + 965, + 454 + ], + [ + 949, + 454 + ], + [ + 940, + 453 + ], + [ + 924, + 453 + ], + [ + 911, + 454 + ], + [ + 911, + 477 + ], + [ + 920, + 490 + ], + [ + 932, + 493 + ], + [ + 939, + 492 + ], + [ + 947, + 486 + ], + [ + 950, + 488 + ], + [ + 962, + 486 + ], + [ + 965, + 483 + ], + [ + 973, + 483 + ], + [ + 981, + 483 + ], + [ + 989, + 479 + ], + [ + 993, + 477 + ], + [ + 1001, + 476 + ], + [ + 1006, + 477 + ], + [ + 1010, + 477 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 931, + 432 + ], + [ + 936, + 435 + ], + [ + 934, + 491 + ], + [ + 930, + 495 + ], + [ + 906, + 491 + ], + [ + 900, + 475 + ], + [ + 901, + 441 + ], + [ + 904, + 434 + ], + [ + 906, + 433 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 807, + 447 + ], + [ + 789, + 466 + ], + [ + 804, + 487 + ], + [ + 892, + 488 + ], + [ + 904, + 488 + ], + [ + 906, + 463 + ], + [ + 906, + 448 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 916, + 467 + ], + [ + 913, + 480 + ], + [ + 911, + 498 + ], + [ + 904, + 498 + ], + [ + 897, + 495 + ], + [ + 897, + 482 + ], + [ + 896, + 466 + ], + [ + 895, + 466 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 819, + 495 + ], + [ + 810, + 491 + ], + [ + 806, + 488 + ], + [ + 805, + 478 + ], + [ + 811, + 472 + ], + [ + 819, + 467 + ], + [ + 833, + 460 + ], + [ + 845, + 452 + ], + [ + 853, + 452 + ], + [ + 868, + 451 + ], + [ + 878, + 455 + ], + [ + 884, + 466 + ], + [ + 885, + 477 + ], + [ + 885, + 486 + ], + [ + 885, + 492 + ], + [ + 882, + 497 + ], + [ + 861, + 493 + ], + [ + 840, + 488 + ], + [ + 823, + 490 + ], + [ + 815, + 491 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 849, + 495 + ], + [ + 864, + 498 + ], + [ + 875, + 498 + ], + [ + 879, + 486 + ], + [ + 873, + 473 + ], + [ + 858, + 466 + ], + [ + 847, + 459 + ], + [ + 847, + 457 + ], + [ + 850, + 434 + ], + [ + 852, + 419 + ], + [ + 862, + 394 + ], + [ + 866, + 373 + ], + [ + 876, + 365 + ], + [ + 898, + 349 + ], + [ + 909, + 327 + ], + [ + 872, + 311 + ], + [ + 844, + 327 + ], + [ + 848, + 346 + ], + [ + 851, + 359 + ], + [ + 859, + 374 + ], + [ + 851, + 394 + ], + [ + 844, + 415 + ], + [ + 835, + 451 + ], + [ + 830, + 463 + ], + [ + 827, + 469 + ], + [ + 816, + 463 + ], + [ + 812, + 476 + ], + [ + 808, + 488 + ], + [ + 817, + 498 + ], + [ + 827, + 497 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 777, + 494 + ], + [ + 777, + 487 + ], + [ + 764, + 487 + ], + [ + 764, + 494 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 773, + 412 + ], + [ + 772, + 486 + ], + [ + 778, + 487 + ], + [ + 779, + 415 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 822, + 436 + ], + [ + 810, + 437 + ], + [ + 810, + 425 + ], + [ + 811, + 415 + ], + [ + 817, + 408 + ], + [ + 823, + 411 + ], + [ + 823, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 748, + 545 + ], + [ + 742, + 550 + ], + [ + 737, + 551 + ], + [ + 731, + 550 + ], + [ + 731, + 545 + ], + [ + 731, + 541 + ], + [ + 716, + 543 + ], + [ + 716, + 547 + ], + [ + 716, + 555 + ], + [ + 711, + 560 + ], + [ + 701, + 559 + ], + [ + 693, + 555 + ], + [ + 692, + 548 + ], + [ + 615, + 552 + ], + [ + 602, + 553 + ], + [ + 602, + 561 + ], + [ + 595, + 563 + ], + [ + 585, + 561 + ], + [ + 580, + 556 + ], + [ + 579, + 544 + ], + [ + 579, + 531 + ], + [ + 577, + 517 + ], + [ + 580, + 507 + ], + [ + 594, + 495 + ], + [ + 585, + 494 + ], + [ + 587, + 485 + ], + [ + 591, + 484 + ], + [ + 597, + 483 + ], + [ + 601, + 485 + ], + [ + 617, + 462 + ], + [ + 628, + 452 + ], + [ + 642, + 451 + ], + [ + 652, + 451 + ], + [ + 655, + 453 + ], + [ + 698, + 453 + ], + [ + 701, + 451 + ], + [ + 711, + 450 + ], + [ + 726, + 457 + ], + [ + 738, + 471 + ], + [ + 748, + 494 + ], + [ + 748, + 508 + ], + [ + 750, + 523 + ], + [ + 749, + 537 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1090, + 480 + ], + [ + 1090, + 485 + ], + [ + 1085, + 485 + ], + [ + 1085, + 482 + ], + [ + 1061, + 483 + ], + [ + 1060, + 487 + ], + [ + 1053, + 486 + ], + [ + 1053, + 481 + ], + [ + 1053, + 471 + ], + [ + 1053, + 464 + ], + [ + 1055, + 459 + ], + [ + 1058, + 453 + ], + [ + 1065, + 454 + ], + [ + 1076, + 454 + ], + [ + 1085, + 455 + ], + [ + 1089, + 465 + ], + [ + 1090, + 474 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1131, + 490 + ], + [ + 1131, + 495 + ], + [ + 1122, + 495 + ], + [ + 1120, + 490 + ], + [ + 1120, + 486 + ], + [ + 1120, + 476 + ], + [ + 1122, + 470 + ], + [ + 1123, + 468 + ], + [ + 1120, + 467 + ], + [ + 1121, + 464 + ], + [ + 1124, + 463 + ], + [ + 1127, + 459 + ], + [ + 1129, + 457 + ], + [ + 1144, + 457 + ], + [ + 1158, + 457 + ], + [ + 1163, + 461 + ], + [ + 1169, + 465 + ], + [ + 1172, + 476 + ], + [ + 1172, + 486 + ], + [ + 1171, + 492 + ], + [ + 1167, + 495 + ], + [ + 1163, + 493 + ], + [ + 1162, + 491 + ], + [ + 1162, + 490 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1023, + 433 + ], + [ + 1016, + 436 + ], + [ + 1011, + 440 + ], + [ + 1010, + 446 + ], + [ + 1010, + 453 + ], + [ + 1011, + 462 + ], + [ + 1018, + 465 + ], + [ + 1028, + 464 + ], + [ + 1032, + 459 + ], + [ + 1036, + 452 + ], + [ + 1037, + 446 + ], + [ + 1035, + 440 + ], + [ + 1031, + 436 + ], + [ + 1029, + 434 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1022, + 499 + ], + [ + 1023, + 537 + ], + [ + 1029, + 537 + ], + [ + 1026, + 433 + ], + [ + 1022, + 433 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1002, + 223 + ], + [ + 1068, + 228 + ], + [ + 1039, + 299 + ], + [ + 997, + 223 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1023, + 173 + ], + [ + 1017, + 179 + ], + [ + 1012, + 184 + ], + [ + 1010, + 192 + ], + [ + 1009, + 204 + ], + [ + 1013, + 212 + ], + [ + 1019, + 219 + ], + [ + 1029, + 225 + ], + [ + 1042, + 227 + ], + [ + 1052, + 224 + ], + [ + 1058, + 216 + ], + [ + 1062, + 208 + ], + [ + 1064, + 197 + ], + [ + 1062, + 181 + ], + [ + 1056, + 173 + ], + [ + 1041, + 168 + ], + [ + 1034, + 168 + ], + [ + 1028, + 172 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 740, + 76 + ], + [ + 739, + 88 + ], + [ + 738, + 92 + ], + [ + 733, + 98 + ], + [ + 699, + 99 + ], + [ + 692, + 93 + ], + [ + 689, + 88 + ], + [ + 688, + 8 + ], + [ + 690, + 4 + ], + [ + 695, + 0 + ], + [ + 731, + 0 + ], + [ + 734, + 2 + ], + [ + 738, + 9 + ], + [ + 739, + 19 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 708, + 21 + ], + [ + 708, + 14 + ], + [ + 769, + 22 + ], + [ + 983, + 76 + ], + [ + 1013, + 91 + ], + [ + 1026, + 108 + ], + [ + 1039, + 132 + ], + [ + 1042, + 171 + ], + [ + 1049, + 574 + ], + [ + 1032, + 574 + ], + [ + 1033, + 154 + ], + [ + 1031, + 129 + ], + [ + 1016, + 106 + ], + [ + 999, + 89 + ], + [ + 974, + 79 + ], + [ + 771, + 27 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1039, + 79 + ], + [ + 1038, + 89 + ], + [ + 1012, + 90 + ], + [ + 1008, + 83 + ], + [ + 1009, + 81 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1013, + 301 + ], + [ + 1011, + 305 + ], + [ + 990, + 308 + ], + [ + 990, + 313 + ], + [ + 992, + 318 + ], + [ + 1008, + 323 + ], + [ + 1008, + 331 + ], + [ + 990, + 333 + ], + [ + 989, + 341 + ], + [ + 992, + 344 + ], + [ + 1009, + 346 + ], + [ + 1008, + 357 + ], + [ + 991, + 357 + ], + [ + 991, + 367 + ], + [ + 995, + 372 + ], + [ + 1009, + 375 + ], + [ + 1014, + 376 + ], + [ + 1014, + 382 + ], + [ + 1015, + 382 + ], + [ + 1031, + 386 + ], + [ + 1035, + 298 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1068, + 306 + ], + [ + 1068, + 314 + ], + [ + 1056, + 319 + ], + [ + 1049, + 321 + ], + [ + 1048, + 331 + ], + [ + 1067, + 331 + ], + [ + 1068, + 338 + ], + [ + 1065, + 343 + ], + [ + 1065, + 343 + ], + [ + 1049, + 347 + ], + [ + 1049, + 353 + ], + [ + 1049, + 356 + ], + [ + 1067, + 357 + ], + [ + 1068, + 363 + ], + [ + 1067, + 367 + ], + [ + 1063, + 370 + ], + [ + 1049, + 372 + ], + [ + 1049, + 381 + ], + [ + 1031, + 381 + ], + [ + 1030, + 298 + ], + [ + 1047, + 298 + ], + [ + 1047, + 302 + ], + [ + 1053, + 304 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1022, + 578 + ], + [ + 1021, + 494 + ], + [ + 1028, + 492 + ], + [ + 1028, + 577 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1011, + 501 + ], + [ + 1005, + 492 + ], + [ + 1003, + 482 + ], + [ + 1004, + 472 + ], + [ + 1011, + 463 + ], + [ + 1020, + 459 + ], + [ + 1031, + 458 + ], + [ + 1041, + 462 + ], + [ + 1049, + 472 + ], + [ + 1051, + 482 + ], + [ + 1050, + 492 + ], + [ + 1045, + 502 + ], + [ + 1034, + 508 + ], + [ + 1023, + 508 + ], + [ + 1015, + 507 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 320, + 477 + ], + [ + 321, + 536 + ], + [ + 326, + 536 + ], + [ + 325, + 473 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 289, + 475 + ], + [ + 294, + 476 + ], + [ + 295, + 531 + ], + [ + 289, + 532 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 258, + 475 + ], + [ + 254, + 475 + ], + [ + 254, + 533 + ], + [ + 256, + 533 + ], + [ + 257, + 533 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 215, + 485 + ], + [ + 211, + 482 + ], + [ + 205, + 477 + ], + [ + 204, + 474 + ], + [ + 194, + 471 + ], + [ + 192, + 464 + ], + [ + 198, + 463 + ], + [ + 203, + 464 + ], + [ + 206, + 462 + ], + [ + 208, + 459 + ], + [ + 208, + 451 + ], + [ + 212, + 451 + ], + [ + 217, + 455 + ], + [ + 218, + 462 + ], + [ + 226, + 464 + ], + [ + 235, + 465 + ], + [ + 240, + 465 + ], + [ + 243, + 463 + ], + [ + 250, + 463 + ], + [ + 252, + 466 + ], + [ + 248, + 473 + ], + [ + 248, + 477 + ], + [ + 248, + 486 + ], + [ + 253, + 490 + ], + [ + 256, + 495 + ], + [ + 252, + 499 + ], + [ + 251, + 505 + ], + [ + 248, + 508 + ], + [ + 248, + 517 + ], + [ + 249, + 524 + ], + [ + 246, + 529 + ], + [ + 242, + 533 + ], + [ + 238, + 536 + ], + [ + 231, + 532 + ], + [ + 218, + 531 + ], + [ + 208, + 513 + ], + [ + 208, + 504 + ], + [ + 210, + 496 + ], + [ + 214, + 491 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 180, + 441 + ], + [ + 183, + 507 + ], + [ + 0, + 485 + ], + [ + 0, + 360 + ], + [ + 57, + 367 + ], + [ + 75, + 378 + ], + [ + 107, + 394 + ], + [ + 108, + 442 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 66, + 195 + ], + [ + 68, + 288 + ], + [ + 23, + 289 + ], + [ + 21, + 196 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 49, + 145 + ], + [ + 58, + 151 + ], + [ + 64, + 161 + ], + [ + 63, + 171 + ], + [ + 60, + 188 + ], + [ + 55, + 196 + ], + [ + 47, + 200 + ], + [ + 35, + 200 + ], + [ + 25, + 194 + ], + [ + 21, + 184 + ], + [ + 20, + 171 + ], + [ + 26, + 153 + ], + [ + 32, + 147 + ], + [ + 39, + 145 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 11, + 393 + ], + [ + 0, + 373 + ], + [ + 0, + 322 + ], + [ + 0, + 318 + ], + [ + 47, + 318 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 12, + 272 + ], + [ + 23, + 272 + ], + [ + 27, + 279 + ], + [ + 29, + 292 + ], + [ + 30, + 306 + ], + [ + 21, + 321 + ], + [ + 8, + 327 + ], + [ + 0, + 324 + ], + [ + 0, + 272 + ], + [ + 0, + 271 + ], + [ + 6, + 271 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 43, + 156 + ], + [ + 48, + 463 + ], + [ + 39, + 462 + ], + [ + 33, + 154 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 57, + 317 + ], + [ + 66, + 317 + ], + [ + 67, + 321 + ], + [ + 73, + 323 + ], + [ + 76, + 324 + ], + [ + 77, + 327 + ], + [ + 77, + 330 + ], + [ + 71, + 333 + ], + [ + 65, + 334 + ], + [ + 66, + 346 + ], + [ + 73, + 348 + ], + [ + 80, + 349 + ], + [ + 80, + 353 + ], + [ + 76, + 355 + ], + [ + 64, + 357 + ], + [ + 64, + 371 + ], + [ + 74, + 371 + ], + [ + 78, + 375 + ], + [ + 79, + 381 + ], + [ + 75, + 382 + ], + [ + 64, + 384 + ], + [ + 64, + 389 + ], + [ + 61, + 392 + ], + [ + 46, + 392 + ], + [ + 46, + 317 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 25, + 392 + ], + [ + 23, + 311 + ], + [ + 42, + 313 + ], + [ + 46, + 318 + ], + [ + 63, + 320 + ], + [ + 64, + 325 + ], + [ + 64, + 327 + ], + [ + 56, + 330 + ], + [ + 45, + 331 + ], + [ + 47, + 340 + ], + [ + 62, + 342 + ], + [ + 63, + 342 + ], + [ + 63, + 349 + ], + [ + 61, + 354 + ], + [ + 47, + 357 + ], + [ + 44, + 357 + ], + [ + 45, + 366 + ], + [ + 51, + 367 + ], + [ + 55, + 368 + ], + [ + 59, + 371 + ], + [ + 60, + 378 + ], + [ + 53, + 382 + ], + [ + 48, + 386 + ], + [ + 44, + 391 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 146, + 671 + ], + [ + 133, + 673 + ], + [ + 112, + 670 + ], + [ + 101, + 661 + ], + [ + 91, + 646 + ], + [ + 82, + 645 + ], + [ + 30, + 645 + ], + [ + 21, + 647 + ], + [ + 16, + 650 + ], + [ + 0, + 650 + ], + [ + 0, + 437 + ], + [ + 4, + 439 + ], + [ + 29, + 442 + ], + [ + 59, + 449 + ], + [ + 80, + 457 + ], + [ + 132, + 480 + ], + [ + 155, + 491 + ], + [ + 166, + 498 + ], + [ + 187, + 500 + ], + [ + 214, + 506 + ], + [ + 227, + 528 + ], + [ + 229, + 547 + ], + [ + 228, + 558 + ], + [ + 232, + 565 + ], + [ + 232, + 590 + ], + [ + 224, + 616 + ], + [ + 219, + 626 + ], + [ + 172, + 631 + ], + [ + 169, + 637 + ], + [ + 163, + 650 + ], + [ + 156, + 661 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 654, + 538 + ], + [ + 613, + 540 + ], + [ + 613, + 529 + ], + [ + 654, + 529 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1829, + 270 + ], + [ + 1861, + 265 + ], + [ + 1857, + 288 + ], + [ + 1829, + 290 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1438, + 492 + ], + [ + 1438, + 487 + ], + [ + 1436, + 482 + ], + [ + 1428, + 479 + ], + [ + 1417, + 475 + ], + [ + 1416, + 468 + ], + [ + 1405, + 470 + ], + [ + 1394, + 472 + ], + [ + 1388, + 476 + ], + [ + 1386, + 482 + ], + [ + 1378, + 486 + ], + [ + 1378, + 494 + ], + [ + 1376, + 505 + ], + [ + 1376, + 512 + ], + [ + 1381, + 514 + ], + [ + 1382, + 500 + ], + [ + 1388, + 498 + ], + [ + 1398, + 500 + ], + [ + 1405, + 503 + ], + [ + 1406, + 506 + ], + [ + 1412, + 508 + ], + [ + 1414, + 503 + ], + [ + 1432, + 501 + ], + [ + 1435, + 503 + ], + [ + 1437, + 504 + ], + [ + 1442, + 501 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1524, + 505 + ], + [ + 1520, + 498 + ], + [ + 1511, + 495 + ], + [ + 1498, + 494 + ], + [ + 1486, + 494 + ], + [ + 1480, + 500 + ], + [ + 1479, + 511 + ], + [ + 1481, + 520 + ], + [ + 1486, + 523 + ], + [ + 1490, + 523 + ], + [ + 1498, + 519 + ], + [ + 1505, + 514 + ], + [ + 1517, + 514 + ], + [ + 1522, + 518 + ], + [ + 1526, + 523 + ], + [ + 1532, + 525 + ], + [ + 1537, + 523 + ], + [ + 1538, + 516 + ], + [ + 1534, + 511 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1625, + 547 + ], + [ + 1614, + 547 + ], + [ + 1604, + 544 + ], + [ + 1579, + 542 + ], + [ + 1569, + 542 + ], + [ + 1563, + 537 + ], + [ + 1548, + 538 + ], + [ + 1544, + 533 + ], + [ + 1547, + 526 + ], + [ + 1548, + 513 + ], + [ + 1550, + 505 + ], + [ + 1558, + 505 + ], + [ + 1565, + 502 + ], + [ + 1567, + 496 + ], + [ + 1568, + 486 + ], + [ + 1581, + 488 + ], + [ + 1591, + 492 + ], + [ + 1598, + 502 + ], + [ + 1609, + 510 + ], + [ + 1615, + 519 + ], + [ + 1621, + 526 + ], + [ + 1626, + 535 + ], + [ + 1627, + 541 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1602, + 551 + ], + [ + 1574, + 552 + ], + [ + 1557, + 545 + ], + [ + 1566, + 540 + ], + [ + 1572, + 533 + ], + [ + 1573, + 522 + ], + [ + 1573, + 519 + ], + [ + 1605, + 520 + ], + [ + 1605, + 526 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000147_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000147_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..4f5bd6951fdeacae75f8911fe1fc4a05198f51bd Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000147_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000148_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000148_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..e94c787f4c3c4950725f46258ea56a8b95bbfe53 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000148_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000148_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000148_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..cf6532d61ed1de7edd37f09a38c88e1827292c92 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000148_000019_gtFine_polygons.json @@ -0,0 +1,6819 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [ + 165, + 249 + ], + [ + 422, + 237 + ], + [ + 627, + 153 + ], + [ + 666, + 91 + ], + [ + 1081, + 55 + ], + [ + 1378, + 51 + ], + [ + 1445, + 49 + ], + [ + 1498, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 231 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 1952, + 432 + ], + [ + 1593, + 431 + ], + [ + 1489, + 432 + ], + [ + 1349, + 444 + ], + [ + 848, + 455 + ], + [ + 560, + 464 + ], + [ + 254, + 478 + ], + [ + 0, + 482 + ], + [ + 0, + 1024 + ], + [ + 2048, + 1024 + ], + [ + 2048, + 450 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1510, + 484 + ], + [ + 1463, + 481 + ], + [ + 1452, + 476 + ], + [ + 1458, + 471 + ], + [ + 1482, + 465 + ], + [ + 1521, + 457 + ], + [ + 1533, + 453 + ], + [ + 1551, + 452 + ], + [ + 1563, + 450 + ], + [ + 1580, + 444 + ], + [ + 1600, + 437 + ], + [ + 2048, + 443 + ], + [ + 2048, + 479 + ], + [ + 1806, + 481 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2041, + 506 + ], + [ + 1561, + 503 + ], + [ + 1488, + 500 + ], + [ + 1451, + 499 + ], + [ + 1443, + 497 + ], + [ + 1447, + 491 + ], + [ + 1460, + 488 + ], + [ + 1579, + 493 + ], + [ + 1696, + 493 + ], + [ + 2021, + 483 + ], + [ + 2048, + 484 + ], + [ + 2048, + 506 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1996, + 546 + ], + [ + 1576, + 560 + ], + [ + 1559, + 555 + ], + [ + 1444, + 516 + ], + [ + 1438, + 503 + ], + [ + 1450, + 502 + ], + [ + 1460, + 505 + ], + [ + 1524, + 505 + ], + [ + 1540, + 509 + ], + [ + 1566, + 518 + ], + [ + 1957, + 521 + ], + [ + 2046, + 522 + ], + [ + 2048, + 522 + ], + [ + 2048, + 544 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2036, + 707 + ], + [ + 1977, + 685 + ], + [ + 1964, + 671 + ], + [ + 1967, + 658 + ], + [ + 1977, + 649 + ], + [ + 2008, + 639 + ], + [ + 2029, + 637 + ], + [ + 2048, + 636 + ], + [ + 2048, + 709 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1479, + 449 + ], + [ + 1467, + 449 + ], + [ + 1465, + 455 + ], + [ + 1453, + 457 + ], + [ + 1442, + 454 + ], + [ + 1443, + 439 + ], + [ + 1452, + 435 + ], + [ + 1492, + 435 + ], + [ + 1517, + 433 + ], + [ + 1540, + 433 + ], + [ + 1538, + 439 + ], + [ + 1527, + 446 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 658, + 48 + ], + [ + 656, + 0 + ], + [ + 832, + 0 + ], + [ + 833, + 2 + ], + [ + 869, + 55 + ], + [ + 880, + 56 + ], + [ + 880, + 47 + ], + [ + 897, + 47 + ], + [ + 897, + 56 + ], + [ + 944, + 54 + ], + [ + 941, + 6 + ], + [ + 954, + 5 + ], + [ + 953, + 0 + ], + [ + 973, + 0 + ], + [ + 973, + 1 + ], + [ + 975, + 14 + ], + [ + 1028, + 12 + ], + [ + 1029, + 4 + ], + [ + 1039, + 5 + ], + [ + 1039, + 10 + ], + [ + 1048, + 10 + ], + [ + 1048, + 2 + ], + [ + 1048, + 0 + ], + [ + 1062, + 0 + ], + [ + 1063, + 1 + ], + [ + 1064, + 11 + ], + [ + 1074, + 11 + ], + [ + 1074, + 5 + ], + [ + 1083, + 5 + ], + [ + 1084, + 10 + ], + [ + 1126, + 8 + ], + [ + 1126, + 1 + ], + [ + 1150, + 1 + ], + [ + 1151, + 9 + ], + [ + 1210, + 7 + ], + [ + 1210, + 1 + ], + [ + 1243, + 0 + ], + [ + 1243, + 4 + ], + [ + 1252, + 6 + ], + [ + 1252, + 0 + ], + [ + 1274, + 0 + ], + [ + 1277, + 6 + ], + [ + 1302, + 7 + ], + [ + 1303, + 0 + ], + [ + 1323, + 0 + ], + [ + 1323, + 9 + ], + [ + 1326, + 25 + ], + [ + 1335, + 25 + ], + [ + 1334, + 21 + ], + [ + 1345, + 18 + ], + [ + 1347, + 21 + ], + [ + 1350, + 25 + ], + [ + 1350, + 8 + ], + [ + 1350, + 7 + ], + [ + 1357, + 7 + ], + [ + 1357, + 23 + ], + [ + 1364, + 24 + ], + [ + 1372, + 25 + ], + [ + 1367, + 59 + ], + [ + 1446, + 92 + ], + [ + 1568, + 93 + ], + [ + 1735, + 111 + ], + [ + 1669, + 435 + ], + [ + 1539, + 439 + ], + [ + 1509, + 441 + ], + [ + 1485, + 441 + ], + [ + 1472, + 441 + ], + [ + 1446, + 443 + ], + [ + 1415, + 443 + ], + [ + 869, + 452 + ], + [ + 827, + 452 + ], + [ + 793, + 452 + ], + [ + 528, + 458 + ], + [ + 208, + 472 + ], + [ + 0, + 474 + ], + [ + 0, + 157 + ], + [ + 8, + 159 + ], + [ + 8, + 175 + ], + [ + 93, + 194 + ], + [ + 91, + 196 + ], + [ + 80, + 197 + ], + [ + 81, + 205 + ], + [ + 87, + 205 + ], + [ + 99, + 210 + ], + [ + 125, + 206 + ], + [ + 136, + 210 + ], + [ + 143, + 211 + ], + [ + 147, + 210 + ], + [ + 155, + 210 + ], + [ + 162, + 216 + ], + [ + 173, + 220 + ], + [ + 207, + 219 + ], + [ + 234, + 223 + ], + [ + 252, + 226 + ], + [ + 319, + 225 + ], + [ + 315, + 113 + ], + [ + 327, + 111 + ], + [ + 327, + 99 + ], + [ + 328, + 90 + ], + [ + 335, + 85 + ], + [ + 339, + 89 + ], + [ + 342, + 95 + ], + [ + 345, + 102 + ], + [ + 348, + 105 + ], + [ + 361, + 109 + ], + [ + 363, + 82 + ], + [ + 353, + 77 + ], + [ + 343, + 76 + ], + [ + 344, + 70 + ], + [ + 461, + 66 + ], + [ + 461, + 61 + ], + [ + 478, + 60 + ], + [ + 483, + 62 + ], + [ + 483, + 66 + ], + [ + 519, + 64 + ], + [ + 519, + 62 + ], + [ + 531, + 61 + ], + [ + 529, + 51 + ], + [ + 589, + 49 + ], + [ + 602, + 51 + ], + [ + 610, + 54 + ], + [ + 610, + 62 + ], + [ + 625, + 62 + ], + [ + 625, + 60 + ], + [ + 618, + 58 + ], + [ + 619, + 57 + ], + [ + 631, + 50 + ], + [ + 648, + 48 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 222, + 433 + ], + [ + 223, + 463 + ], + [ + 221, + 463 + ], + [ + 220, + 436 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 225, + 428 + ], + [ + 225, + 440 + ], + [ + 217, + 439 + ], + [ + 217, + 428 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 171, + 434 + ], + [ + 182, + 440 + ], + [ + 181, + 455 + ], + [ + 179, + 468 + ], + [ + 136, + 468 + ], + [ + 135, + 442 + ], + [ + 135, + 436 + ], + [ + 139, + 434 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 90, + 431 + ], + [ + 90, + 477 + ], + [ + 93, + 477 + ], + [ + 93, + 424 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 94, + 422 + ], + [ + 93, + 436 + ], + [ + 90, + 436 + ], + [ + 90, + 421 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 9, + 466 + ], + [ + 9, + 448 + ], + [ + 18, + 424 + ], + [ + 20, + 427 + ], + [ + 33, + 424 + ], + [ + 38, + 411 + ], + [ + 52, + 406 + ], + [ + 54, + 398 + ], + [ + 60, + 374 + ], + [ + 61, + 365 + ], + [ + 67, + 359 + ], + [ + 72, + 361 + ], + [ + 73, + 380 + ], + [ + 78, + 388 + ], + [ + 85, + 386 + ], + [ + 94, + 378 + ], + [ + 94, + 364 + ], + [ + 102, + 354 + ], + [ + 112, + 339 + ], + [ + 121, + 313 + ], + [ + 134, + 297 + ], + [ + 134, + 282 + ], + [ + 118, + 259 + ], + [ + 101, + 242 + ], + [ + 59, + 237 + ], + [ + 36, + 237 + ], + [ + 24, + 244 + ], + [ + 9, + 238 + ], + [ + 0, + 229 + ], + [ + 0, + 475 + ], + [ + 11, + 478 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 34, + 391 + ], + [ + 81, + 379 + ], + [ + 81, + 381 + ], + [ + 33, + 394 + ], + [ + 30, + 409 + ], + [ + 30, + 436 + ], + [ + 27, + 436 + ], + [ + 27, + 411 + ], + [ + 29, + 398 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 81, + 395 + ], + [ + 80, + 377 + ], + [ + 88, + 378 + ], + [ + 87, + 396 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 285, + 441 + ], + [ + 245, + 440 + ], + [ + 243, + 427 + ], + [ + 284, + 430 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1513, + 424 + ], + [ + 1506, + 426 + ], + [ + 1501, + 427 + ], + [ + 1501, + 438 + ], + [ + 1504, + 445 + ], + [ + 1511, + 444 + ], + [ + 1516, + 441 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1557, + 423 + ], + [ + 1564, + 421 + ], + [ + 1574, + 417 + ], + [ + 1580, + 417 + ], + [ + 1590, + 427 + ], + [ + 1589, + 439 + ], + [ + 1579, + 443 + ], + [ + 1569, + 443 + ], + [ + 1562, + 436 + ], + [ + 1562, + 428 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1561, + 391 + ], + [ + 1561, + 439 + ], + [ + 1564, + 438 + ], + [ + 1565, + 389 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1554, + 385 + ], + [ + 1531, + 381 + ], + [ + 1509, + 365 + ], + [ + 1492, + 361 + ], + [ + 1484, + 348 + ], + [ + 1474, + 346 + ], + [ + 1462, + 341 + ], + [ + 1474, + 337 + ], + [ + 1462, + 322 + ], + [ + 1448, + 315 + ], + [ + 1432, + 311 + ], + [ + 1427, + 325 + ], + [ + 1422, + 336 + ], + [ + 1404, + 335 + ], + [ + 1403, + 345 + ], + [ + 1405, + 349 + ], + [ + 1391, + 349 + ], + [ + 1384, + 354 + ], + [ + 1379, + 369 + ], + [ + 1368, + 371 + ], + [ + 1366, + 355 + ], + [ + 1360, + 345 + ], + [ + 1353, + 352 + ], + [ + 1345, + 358 + ], + [ + 1343, + 362 + ], + [ + 1348, + 433 + ], + [ + 1338, + 435 + ], + [ + 1335, + 414 + ], + [ + 1334, + 380 + ], + [ + 1332, + 348 + ], + [ + 1322, + 341 + ], + [ + 1305, + 340 + ], + [ + 1292, + 341 + ], + [ + 1290, + 352 + ], + [ + 1292, + 373 + ], + [ + 1306, + 381 + ], + [ + 1322, + 382 + ], + [ + 1333, + 384 + ], + [ + 1336, + 397 + ], + [ + 1328, + 413 + ], + [ + 1307, + 420 + ], + [ + 1295, + 429 + ], + [ + 1291, + 436 + ], + [ + 1279, + 431 + ], + [ + 1271, + 419 + ], + [ + 1271, + 397 + ], + [ + 1280, + 387 + ], + [ + 1283, + 366 + ], + [ + 1282, + 345 + ], + [ + 1280, + 341 + ], + [ + 1273, + 348 + ], + [ + 1275, + 367 + ], + [ + 1267, + 387 + ], + [ + 1266, + 412 + ], + [ + 1266, + 436 + ], + [ + 1256, + 435 + ], + [ + 1258, + 408 + ], + [ + 1258, + 390 + ], + [ + 1258, + 371 + ], + [ + 1253, + 359 + ], + [ + 1234, + 353 + ], + [ + 1228, + 370 + ], + [ + 1233, + 379 + ], + [ + 1253, + 381 + ], + [ + 1253, + 400 + ], + [ + 1247, + 413 + ], + [ + 1242, + 422 + ], + [ + 1227, + 430 + ], + [ + 1211, + 420 + ], + [ + 1208, + 415 + ], + [ + 1183, + 390 + ], + [ + 1196, + 386 + ], + [ + 1200, + 378 + ], + [ + 1201, + 359 + ], + [ + 1200, + 335 + ], + [ + 1184, + 314 + ], + [ + 1176, + 338 + ], + [ + 1186, + 365 + ], + [ + 1183, + 393 + ], + [ + 1173, + 398 + ], + [ + 1154, + 386 + ], + [ + 1140, + 356 + ], + [ + 1139, + 331 + ], + [ + 1122, + 307 + ], + [ + 1096, + 303 + ], + [ + 1083, + 301 + ], + [ + 1080, + 315 + ], + [ + 1075, + 337 + ], + [ + 1065, + 352 + ], + [ + 1058, + 353 + ], + [ + 1048, + 347 + ], + [ + 1035, + 348 + ], + [ + 1028, + 340 + ], + [ + 1038, + 322 + ], + [ + 1039, + 311 + ], + [ + 1033, + 298 + ], + [ + 1037, + 274 + ], + [ + 1037, + 263 + ], + [ + 1032, + 250 + ], + [ + 1039, + 244 + ], + [ + 1054, + 244 + ], + [ + 1058, + 224 + ], + [ + 1060, + 213 + ], + [ + 1076, + 220 + ], + [ + 1107, + 207 + ], + [ + 1118, + 193 + ], + [ + 1127, + 177 + ], + [ + 1108, + 180 + ], + [ + 1094, + 178 + ], + [ + 1085, + 178 + ], + [ + 1085, + 191 + ], + [ + 1072, + 198 + ], + [ + 1062, + 189 + ], + [ + 1054, + 164 + ], + [ + 1057, + 155 + ], + [ + 1058, + 147 + ], + [ + 1045, + 139 + ], + [ + 1054, + 116 + ], + [ + 1051, + 101 + ], + [ + 1064, + 104 + ], + [ + 1067, + 97 + ], + [ + 1059, + 84 + ], + [ + 1067, + 67 + ], + [ + 1086, + 70 + ], + [ + 1109, + 73 + ], + [ + 1125, + 62 + ], + [ + 1136, + 58 + ], + [ + 1144, + 65 + ], + [ + 1159, + 65 + ], + [ + 1158, + 80 + ], + [ + 1159, + 89 + ], + [ + 1181, + 85 + ], + [ + 1176, + 70 + ], + [ + 1171, + 61 + ], + [ + 1177, + 50 + ], + [ + 1184, + 48 + ], + [ + 1181, + 38 + ], + [ + 1190, + 17 + ], + [ + 1203, + 10 + ], + [ + 1214, + 4 + ], + [ + 1218, + 0 + ], + [ + 1238, + 0 + ], + [ + 1243, + 0 + ], + [ + 1251, + 5 + ], + [ + 1254, + 0 + ], + [ + 1269, + 0 + ], + [ + 1280, + 1 + ], + [ + 1281, + 5 + ], + [ + 1304, + 6 + ], + [ + 1318, + 6 + ], + [ + 1329, + 10 + ], + [ + 1334, + 21 + ], + [ + 1347, + 26 + ], + [ + 1351, + 33 + ], + [ + 1363, + 44 + ], + [ + 1368, + 38 + ], + [ + 1357, + 25 + ], + [ + 1367, + 9 + ], + [ + 1386, + 0 + ], + [ + 1599, + 0 + ], + [ + 1609, + 251 + ], + [ + 1586, + 286 + ], + [ + 1577, + 343 + ], + [ + 1581, + 421 + ], + [ + 1587, + 448 + ], + [ + 1573, + 450 + ], + [ + 1566, + 378 + ], + [ + 1563, + 328 + ], + [ + 1557, + 325 + ], + [ + 1550, + 336 + ], + [ + 1558, + 346 + ], + [ + 1545, + 353 + ], + [ + 1533, + 357 + ], + [ + 1552, + 364 + ], + [ + 1563, + 373 + ], + [ + 1561, + 380 + ], + [ + 1559, + 385 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1557, + 399 + ], + [ + 1557, + 373 + ], + [ + 1567, + 375 + ], + [ + 1566, + 399 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1543, + 419 + ], + [ + 1560, + 421 + ], + [ + 1568, + 427 + ], + [ + 1574, + 435 + ], + [ + 1578, + 444 + ], + [ + 1578, + 450 + ], + [ + 1570, + 457 + ], + [ + 1560, + 456 + ], + [ + 1549, + 454 + ], + [ + 1539, + 452 + ], + [ + 1530, + 445 + ], + [ + 1530, + 431 + ], + [ + 1530, + 422 + ], + [ + 1533, + 420 + ], + [ + 1536, + 419 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1532, + 386 + ], + [ + 1531, + 465 + ], + [ + 1537, + 467 + ], + [ + 1534, + 379 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1537, + 386 + ], + [ + 1537, + 372 + ], + [ + 1536, + 372 + ], + [ + 1537, + 365 + ], + [ + 1535, + 359 + ], + [ + 1528, + 358 + ], + [ + 1525, + 366 + ], + [ + 1526, + 389 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1547, + 457 + ], + [ + 1536, + 453 + ], + [ + 1534, + 438 + ], + [ + 1534, + 426 + ], + [ + 1543, + 427 + ], + [ + 1550, + 426 + ], + [ + 1548, + 453 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1498, + 376 + ], + [ + 1500, + 470 + ], + [ + 1503, + 471 + ], + [ + 1500, + 372 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1505, + 353 + ], + [ + 1505, + 383 + ], + [ + 1491, + 382 + ], + [ + 1491, + 352 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 1584, + 28 + ], + [ + 1589, + 252 + ], + [ + 1591, + 273 + ], + [ + 1584, + 286 + ], + [ + 1594, + 424 + ], + [ + 1593, + 455 + ], + [ + 1593, + 466 + ], + [ + 1958, + 465 + ], + [ + 2048, + 464 + ], + [ + 2048, + 463 + ], + [ + 2048, + 0 + ], + [ + 1583, + 0 + ], + [ + 1584, + 1 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1502, + 32 + ], + [ + 1510, + 476 + ], + [ + 1527, + 476 + ], + [ + 1516, + 19 + ], + [ + 1514, + 0 + ], + [ + 1501, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1529, + 220 + ], + [ + 1531, + 478 + ], + [ + 1519, + 479 + ], + [ + 1517, + 218 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1494, + 250 + ], + [ + 1505, + 250 + ], + [ + 1506, + 245 + ], + [ + 1516, + 246 + ], + [ + 1518, + 264 + ], + [ + 1508, + 264 + ], + [ + 1506, + 268 + ], + [ + 1499, + 266 + ], + [ + 1492, + 261 + ], + [ + 1492, + 257 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1540, + 242 + ], + [ + 1539, + 312 + ], + [ + 1516, + 311 + ], + [ + 1520, + 240 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1556, + 247 + ], + [ + 1591, + 249 + ], + [ + 1597, + 268 + ], + [ + 1575, + 312 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1589, + 302 + ], + [ + 1596, + 485 + ], + [ + 1584, + 487 + ], + [ + 1581, + 301 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1575, + 221 + ], + [ + 1571, + 213 + ], + [ + 1571, + 202 + ], + [ + 1571, + 192 + ], + [ + 1578, + 185 + ], + [ + 1583, + 186 + ], + [ + 1585, + 191 + ], + [ + 1585, + 202 + ], + [ + 1584, + 215 + ], + [ + 1581, + 218 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1600, + 237 + ], + [ + 1584, + 234 + ], + [ + 1583, + 239 + ], + [ + 1587, + 258 + ], + [ + 1591, + 270 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1543, + 279 + ], + [ + 1565, + 253 + ], + [ + 1586, + 275 + ], + [ + 1565, + 299 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1596, + 289 + ], + [ + 1606, + 289 + ], + [ + 1608, + 295 + ], + [ + 1605, + 301 + ], + [ + 1605, + 305 + ], + [ + 1609, + 310 + ], + [ + 1609, + 314 + ], + [ + 1609, + 321 + ], + [ + 1608, + 326 + ], + [ + 1613, + 328 + ], + [ + 1612, + 333 + ], + [ + 1606, + 339 + ], + [ + 1594, + 343 + ], + [ + 1582, + 341 + ], + [ + 1584, + 289 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1552, + 302 + ], + [ + 1555, + 351 + ], + [ + 1585, + 351 + ], + [ + 1584, + 300 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1636, + 167 + ], + [ + 1640, + 479 + ], + [ + 1649, + 479 + ], + [ + 1642, + 169 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1636, + 157 + ], + [ + 1637, + 210 + ], + [ + 1655, + 211 + ], + [ + 1655, + 201 + ], + [ + 1655, + 200 + ], + [ + 1658, + 197 + ], + [ + 1656, + 193 + ], + [ + 1655, + 193 + ], + [ + 1655, + 187 + ], + [ + 1657, + 186 + ], + [ + 1657, + 183 + ], + [ + 1656, + 179 + ], + [ + 1655, + 179 + ], + [ + 1655, + 173 + ], + [ + 1656, + 170 + ], + [ + 1657, + 166 + ], + [ + 1656, + 163 + ], + [ + 1655, + 162 + ], + [ + 1655, + 159 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1655, + 303 + ], + [ + 1655, + 312 + ], + [ + 1644, + 310 + ], + [ + 1642, + 300 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1611, + 293 + ], + [ + 1638, + 293 + ], + [ + 1637, + 310 + ], + [ + 1612, + 311 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1623, + 368 + ], + [ + 1625, + 326 + ], + [ + 1639, + 325 + ], + [ + 1640, + 368 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1622, + 330 + ], + [ + 1606, + 306 + ], + [ + 1640, + 306 + ], + [ + 1626, + 336 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1676, + 328 + ], + [ + 1664, + 361 + ], + [ + 1648, + 328 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1656, + 368 + ], + [ + 1645, + 367 + ], + [ + 1645, + 327 + ], + [ + 1654, + 326 + ], + [ + 1659, + 328 + ], + [ + 1660, + 331 + ], + [ + 1660, + 335 + ], + [ + 1658, + 339 + ], + [ + 1658, + 340 + ], + [ + 1662, + 341 + ], + [ + 1662, + 345 + ], + [ + 1661, + 350 + ], + [ + 1658, + 352 + ], + [ + 1660, + 354 + ], + [ + 1661, + 358 + ], + [ + 1660, + 364 + ], + [ + 1657, + 365 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1900, + 246 + ], + [ + 1901, + 306 + ], + [ + 1897, + 314 + ], + [ + 1892, + 316 + ], + [ + 1888, + 317 + ], + [ + 1880, + 316 + ], + [ + 1879, + 243 + ], + [ + 1882, + 241 + ], + [ + 1886, + 238 + ], + [ + 1891, + 239 + ], + [ + 1896, + 243 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2028, + 403 + ], + [ + 2030, + 481 + ], + [ + 2037, + 481 + ], + [ + 2035, + 404 + ], + [ + 2035, + 402 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1847, + 533 + ], + [ + 1875, + 533 + ], + [ + 1887, + 530 + ], + [ + 1878, + 1 + ], + [ + 1842, + 3 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1834, + 222 + ], + [ + 1856, + 222 + ], + [ + 1860, + 227 + ], + [ + 1865, + 231 + ], + [ + 1865, + 237 + ], + [ + 1860, + 240 + ], + [ + 1860, + 251 + ], + [ + 1861, + 253 + ], + [ + 1865, + 257 + ], + [ + 1864, + 266 + ], + [ + 1860, + 270 + ], + [ + 1861, + 281 + ], + [ + 1866, + 285 + ], + [ + 1866, + 291 + ], + [ + 1861, + 297 + ], + [ + 1858, + 303 + ], + [ + 1858, + 307 + ], + [ + 1854, + 309 + ], + [ + 1838, + 310 + ], + [ + 1834, + 305 + ], + [ + 1834, + 299 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1480, + 428 + ], + [ + 1481, + 448 + ], + [ + 1471, + 449 + ], + [ + 1472, + 428 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1460, + 423 + ], + [ + 1461, + 457 + ], + [ + 1447, + 457 + ], + [ + 1449, + 433 + ], + [ + 1450, + 425 + ], + [ + 1456, + 423 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1389, + 436 + ], + [ + 1389, + 358 + ], + [ + 1381, + 353 + ], + [ + 1387, + 351 + ], + [ + 1433, + 351 + ], + [ + 1435, + 442 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1355, + 394 + ], + [ + 1355, + 440 + ], + [ + 1358, + 440 + ], + [ + 1358, + 391 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1446, + 17 + ], + [ + 1452, + 496 + ], + [ + 1427, + 498 + ], + [ + 1424, + 17 + ], + [ + 1425, + 0 + ], + [ + 1445, + 0 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1439, + 308 + ], + [ + 1438, + 359 + ], + [ + 1422, + 359 + ], + [ + 1420, + 350 + ], + [ + 1421, + 345 + ], + [ + 1423, + 343 + ], + [ + 1423, + 334 + ], + [ + 1419, + 334 + ], + [ + 1421, + 329 + ], + [ + 1422, + 326 + ], + [ + 1422, + 320 + ], + [ + 1419, + 320 + ], + [ + 1419, + 313 + ], + [ + 1422, + 311 + ], + [ + 1423, + 309 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1242, + 368 + ], + [ + 1243, + 434 + ], + [ + 1247, + 436 + ], + [ + 1245, + 366 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1250, + 364 + ], + [ + 1242, + 364 + ], + [ + 1245, + 391 + ], + [ + 1253, + 393 + ], + [ + 1256, + 392 + ], + [ + 1258, + 388 + ], + [ + 1260, + 385 + ], + [ + 1257, + 384 + ], + [ + 1257, + 380 + ], + [ + 1259, + 380 + ], + [ + 1259, + 377 + ], + [ + 1256, + 376 + ], + [ + 1255, + 372 + ], + [ + 1260, + 372 + ], + [ + 1260, + 367 + ], + [ + 1257, + 366 + ], + [ + 1254, + 365 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1233, + 363 + ], + [ + 1247, + 365 + ], + [ + 1245, + 375 + ], + [ + 1231, + 373 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1136, + 207 + ], + [ + 1140, + 386 + ], + [ + 1144, + 387 + ], + [ + 1141, + 206 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1081, + 283 + ], + [ + 1083, + 314 + ], + [ + 1093, + 314 + ], + [ + 1093, + 283 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1080, + 288 + ], + [ + 1105, + 287 + ], + [ + 1129, + 297 + ], + [ + 1136, + 305 + ], + [ + 1140, + 318 + ], + [ + 1136, + 318 + ], + [ + 1130, + 307 + ], + [ + 1127, + 300 + ], + [ + 1101, + 291 + ], + [ + 1082, + 291 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 991, + 341 + ], + [ + 995, + 390 + ], + [ + 997, + 389 + ], + [ + 994, + 340 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1080, + 377 + ], + [ + 1066, + 365 + ], + [ + 1049, + 368 + ], + [ + 1038, + 380 + ], + [ + 1031, + 376 + ], + [ + 1029, + 359 + ], + [ + 1022, + 343 + ], + [ + 1006, + 349 + ], + [ + 1007, + 372 + ], + [ + 1001, + 384 + ], + [ + 983, + 384 + ], + [ + 971, + 375 + ], + [ + 971, + 368 + ], + [ + 969, + 358 + ], + [ + 963, + 346 + ], + [ + 958, + 344 + ], + [ + 950, + 365 + ], + [ + 948, + 381 + ], + [ + 940, + 388 + ], + [ + 937, + 381 + ], + [ + 925, + 371 + ], + [ + 920, + 386 + ], + [ + 916, + 385 + ], + [ + 913, + 371 + ], + [ + 907, + 360 + ], + [ + 902, + 377 + ], + [ + 899, + 381 + ], + [ + 893, + 373 + ], + [ + 884, + 366 + ], + [ + 879, + 374 + ], + [ + 866, + 373 + ], + [ + 851, + 378 + ], + [ + 851, + 391 + ], + [ + 851, + 394 + ], + [ + 847, + 403 + ], + [ + 853, + 423 + ], + [ + 852, + 436 + ], + [ + 850, + 426 + ], + [ + 845, + 419 + ], + [ + 839, + 422 + ], + [ + 831, + 431 + ], + [ + 834, + 421 + ], + [ + 836, + 408 + ], + [ + 840, + 399 + ], + [ + 839, + 388 + ], + [ + 836, + 384 + ], + [ + 834, + 393 + ], + [ + 827, + 392 + ], + [ + 824, + 385 + ], + [ + 814, + 381 + ], + [ + 810, + 391 + ], + [ + 806, + 399 + ], + [ + 799, + 397 + ], + [ + 784, + 390 + ], + [ + 762, + 393 + ], + [ + 753, + 401 + ], + [ + 724, + 409 + ], + [ + 713, + 401 + ], + [ + 713, + 387 + ], + [ + 713, + 376 + ], + [ + 742, + 376 + ], + [ + 753, + 376 + ], + [ + 759, + 367 + ], + [ + 745, + 358 + ], + [ + 718, + 354 + ], + [ + 695, + 357 + ], + [ + 691, + 370 + ], + [ + 679, + 378 + ], + [ + 672, + 367 + ], + [ + 667, + 360 + ], + [ + 645, + 353 + ], + [ + 625, + 353 + ], + [ + 601, + 365 + ], + [ + 600, + 366 + ], + [ + 588, + 370 + ], + [ + 579, + 383 + ], + [ + 556, + 381 + ], + [ + 537, + 382 + ], + [ + 521, + 373 + ], + [ + 505, + 366 + ], + [ + 501, + 357 + ], + [ + 487, + 354 + ], + [ + 471, + 360 + ], + [ + 478, + 381 + ], + [ + 479, + 397 + ], + [ + 471, + 400 + ], + [ + 457, + 394 + ], + [ + 455, + 369 + ], + [ + 437, + 364 + ], + [ + 432, + 372 + ], + [ + 421, + 373 + ], + [ + 399, + 377 + ], + [ + 375, + 397 + ], + [ + 364, + 408 + ], + [ + 345, + 415 + ], + [ + 340, + 420 + ], + [ + 324, + 410 + ], + [ + 311, + 409 + ], + [ + 298, + 412 + ], + [ + 290, + 424 + ], + [ + 289, + 445 + ], + [ + 290, + 462 + ], + [ + 294, + 464 + ], + [ + 320, + 468 + ], + [ + 479, + 465 + ], + [ + 614, + 464 + ], + [ + 737, + 457 + ], + [ + 859, + 450 + ], + [ + 1086, + 444 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1090, + 333 + ], + [ + 1085, + 329 + ], + [ + 1083, + 319 + ], + [ + 1085, + 312 + ], + [ + 1088, + 309 + ], + [ + 1096, + 305 + ], + [ + 1100, + 306 + ], + [ + 1110, + 312 + ], + [ + 1111, + 326 + ], + [ + 1107, + 333 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1101, + 35 + ], + [ + 1110, + 350 + ], + [ + 1124, + 349 + ], + [ + 1115, + 34 + ], + [ + 1115, + 30 + ], + [ + 1112, + 29 + ], + [ + 1106, + 29 + ], + [ + 1104, + 31 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1093, + 379 + ], + [ + 1093, + 304 + ], + [ + 1144, + 303 + ], + [ + 1145, + 388 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 983, + 315 + ], + [ + 985, + 351 + ], + [ + 1001, + 351 + ], + [ + 1001, + 314 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 881, + 447 + ], + [ + 842, + 446 + ], + [ + 808, + 445 + ], + [ + 788, + 445 + ], + [ + 788, + 463 + ], + [ + 853, + 465 + ], + [ + 889, + 465 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 822, + 434 + ], + [ + 823, + 467 + ], + [ + 823, + 469 + ], + [ + 810, + 469 + ], + [ + 810, + 431 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 813, + 471 + ], + [ + 807, + 471 + ], + [ + 802, + 465 + ], + [ + 796, + 467 + ], + [ + 788, + 467 + ], + [ + 784, + 459 + ], + [ + 784, + 448 + ], + [ + 789, + 444 + ], + [ + 798, + 445 + ], + [ + 798, + 445 + ], + [ + 798, + 438 + ], + [ + 801, + 438 + ], + [ + 807, + 440 + ], + [ + 809, + 446 + ], + [ + 810, + 451 + ], + [ + 811, + 458 + ], + [ + 811, + 459 + ], + [ + 813, + 459 + ], + [ + 813, + 459 + ], + [ + 814, + 462 + ], + [ + 815, + 463 + ], + [ + 815, + 466 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 10, + 495 + ], + [ + 79, + 494 + ], + [ + 119, + 494 + ], + [ + 145, + 496 + ], + [ + 169, + 499 + ], + [ + 202, + 501 + ], + [ + 221, + 496 + ], + [ + 239, + 491 + ], + [ + 294, + 491 + ], + [ + 381, + 487 + ], + [ + 416, + 481 + ], + [ + 430, + 477 + ], + [ + 463, + 479 + ], + [ + 474, + 478 + ], + [ + 464, + 487 + ], + [ + 477, + 491 + ], + [ + 516, + 487 + ], + [ + 539, + 485 + ], + [ + 546, + 482 + ], + [ + 543, + 478 + ], + [ + 571, + 476 + ], + [ + 602, + 475 + ], + [ + 629, + 475 + ], + [ + 656, + 476 + ], + [ + 685, + 474 + ], + [ + 705, + 472 + ], + [ + 734, + 475 + ], + [ + 749, + 474 + ], + [ + 746, + 460 + ], + [ + 742, + 448 + ], + [ + 485, + 456 + ], + [ + 221, + 461 + ], + [ + 20, + 471 + ], + [ + 0, + 472 + ], + [ + 0, + 495 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 785, + 370 + ], + [ + 789, + 472 + ], + [ + 746, + 473 + ], + [ + 746, + 459 + ], + [ + 748, + 452 + ], + [ + 743, + 373 + ], + [ + 742, + 366 + ], + [ + 738, + 359 + ], + [ + 741, + 358 + ], + [ + 793, + 358 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 740, + 438 + ], + [ + 740, + 473 + ], + [ + 748, + 473 + ], + [ + 748, + 437 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 719, + 362 + ], + [ + 723, + 471 + ], + [ + 728, + 472 + ], + [ + 724, + 362 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 730, + 398 + ], + [ + 730, + 386 + ], + [ + 727, + 383 + ], + [ + 721, + 385 + ], + [ + 719, + 390 + ], + [ + 723, + 391 + ], + [ + 721, + 393 + ], + [ + 718, + 394 + ], + [ + 720, + 397 + ], + [ + 721, + 399 + ], + [ + 720, + 401 + ], + [ + 720, + 403 + ], + [ + 725, + 407 + ], + [ + 728, + 407 + ], + [ + 729, + 407 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 737, + 385 + ], + [ + 736, + 409 + ], + [ + 744, + 408 + ], + [ + 743, + 386 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 533, + 373 + ], + [ + 532, + 346 + ], + [ + 522, + 332 + ], + [ + 516, + 319 + ], + [ + 509, + 314 + ], + [ + 506, + 320 + ], + [ + 489, + 325 + ], + [ + 496, + 337 + ], + [ + 493, + 345 + ], + [ + 477, + 350 + ], + [ + 464, + 351 + ], + [ + 444, + 345 + ], + [ + 438, + 336 + ], + [ + 431, + 342 + ], + [ + 428, + 353 + ], + [ + 411, + 359 + ], + [ + 407, + 368 + ], + [ + 419, + 466 + ], + [ + 407, + 466 + ], + [ + 404, + 403 + ], + [ + 398, + 374 + ], + [ + 386, + 367 + ], + [ + 370, + 357 + ], + [ + 357, + 361 + ], + [ + 346, + 363 + ], + [ + 339, + 367 + ], + [ + 330, + 372 + ], + [ + 326, + 380 + ], + [ + 325, + 420 + ], + [ + 323, + 466 + ], + [ + 312, + 466 + ], + [ + 311, + 374 + ], + [ + 298, + 367 + ], + [ + 279, + 370 + ], + [ + 265, + 377 + ], + [ + 262, + 395 + ], + [ + 266, + 408 + ], + [ + 283, + 422 + ], + [ + 270, + 437 + ], + [ + 275, + 454 + ], + [ + 275, + 467 + ], + [ + 271, + 467 + ], + [ + 270, + 467 + ], + [ + 267, + 444 + ], + [ + 260, + 432 + ], + [ + 256, + 430 + ], + [ + 251, + 409 + ], + [ + 247, + 409 + ], + [ + 243, + 391 + ], + [ + 233, + 372 + ], + [ + 220, + 358 + ], + [ + 188, + 347 + ], + [ + 185, + 332 + ], + [ + 183, + 317 + ], + [ + 173, + 304 + ], + [ + 171, + 289 + ], + [ + 179, + 280 + ], + [ + 184, + 256 + ], + [ + 199, + 245 + ], + [ + 217, + 244 + ], + [ + 232, + 238 + ], + [ + 224, + 230 + ], + [ + 237, + 225 + ], + [ + 249, + 220 + ], + [ + 259, + 213 + ], + [ + 246, + 201 + ], + [ + 245, + 187 + ], + [ + 268, + 181 + ], + [ + 280, + 162 + ], + [ + 306, + 162 + ], + [ + 327, + 162 + ], + [ + 338, + 168 + ], + [ + 343, + 176 + ], + [ + 346, + 161 + ], + [ + 349, + 146 + ], + [ + 358, + 146 + ], + [ + 350, + 134 + ], + [ + 367, + 131 + ], + [ + 384, + 134 + ], + [ + 395, + 144 + ], + [ + 401, + 156 + ], + [ + 407, + 150 + ], + [ + 419, + 144 + ], + [ + 427, + 150 + ], + [ + 446, + 152 + ], + [ + 452, + 158 + ], + [ + 457, + 163 + ], + [ + 470, + 171 + ], + [ + 477, + 162 + ], + [ + 476, + 150 + ], + [ + 484, + 138 + ], + [ + 486, + 131 + ], + [ + 475, + 132 + ], + [ + 450, + 123 + ], + [ + 453, + 111 + ], + [ + 458, + 105 + ], + [ + 472, + 104 + ], + [ + 472, + 100 + ], + [ + 467, + 91 + ], + [ + 465, + 80 + ], + [ + 473, + 72 + ], + [ + 481, + 71 + ], + [ + 491, + 78 + ], + [ + 501, + 85 + ], + [ + 511, + 97 + ], + [ + 507, + 112 + ], + [ + 523, + 115 + ], + [ + 535, + 118 + ], + [ + 537, + 126 + ], + [ + 531, + 132 + ], + [ + 538, + 139 + ], + [ + 555, + 128 + ], + [ + 561, + 137 + ], + [ + 582, + 156 + ], + [ + 588, + 163 + ], + [ + 583, + 171 + ], + [ + 562, + 170 + ], + [ + 545, + 175 + ], + [ + 548, + 182 + ], + [ + 564, + 191 + ], + [ + 582, + 210 + ], + [ + 594, + 236 + ], + [ + 601, + 263 + ], + [ + 600, + 283 + ], + [ + 608, + 287 + ], + [ + 620, + 303 + ], + [ + 614, + 311 + ], + [ + 610, + 319 + ], + [ + 601, + 309 + ], + [ + 590, + 303 + ], + [ + 576, + 299 + ], + [ + 557, + 295 + ], + [ + 542, + 294 + ], + [ + 537, + 298 + ], + [ + 545, + 380 + ], + [ + 541, + 394 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 528, + 374 + ], + [ + 533, + 466 + ], + [ + 588, + 465 + ], + [ + 588, + 444 + ], + [ + 588, + 416 + ], + [ + 611, + 415 + ], + [ + 611, + 390 + ], + [ + 599, + 390 + ], + [ + 585, + 380 + ], + [ + 585, + 375 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 446, + 370 + ], + [ + 452, + 487 + ], + [ + 447, + 487 + ], + [ + 442, + 374 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 436, + 391 + ], + [ + 437, + 399 + ], + [ + 432, + 416 + ], + [ + 426, + 399 + ], + [ + 427, + 391 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 453, + 411 + ], + [ + 452, + 385 + ], + [ + 440, + 385 + ], + [ + 440, + 413 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 375, + 440 + ], + [ + 383, + 440 + ], + [ + 408, + 439 + ], + [ + 409, + 482 + ], + [ + 382, + 488 + ], + [ + 375, + 487 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 344, + 243 + ], + [ + 354, + 244 + ], + [ + 360, + 462 + ], + [ + 351, + 460 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 364, + 440 + ], + [ + 357, + 436 + ], + [ + 352, + 439 + ], + [ + 354, + 450 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 201, + 359 + ], + [ + 203, + 457 + ], + [ + 200, + 456 + ], + [ + 198, + 356 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 216, + 424 + ], + [ + 215, + 432 + ], + [ + 201, + 432 + ], + [ + 201, + 422 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 192, + 284 + ], + [ + 200, + 462 + ], + [ + 191, + 461 + ], + [ + 187, + 280 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 48, + 288 + ], + [ + 58, + 495 + ], + [ + 71, + 491 + ], + [ + 62, + 284 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 81, + 292 + ], + [ + 59, + 293 + ], + [ + 58, + 253 + ], + [ + 79, + 253 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 145, + 278 + ], + [ + 69, + 261 + ], + [ + 69, + 264 + ], + [ + 148, + 284 + ], + [ + 156, + 289 + ], + [ + 163, + 303 + ], + [ + 165, + 326 + ], + [ + 169, + 467 + ], + [ + 174, + 466 + ], + [ + 175, + 466 + ], + [ + 170, + 302 + ], + [ + 166, + 291 + ], + [ + 156, + 282 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 152, + 339 + ], + [ + 153, + 304 + ], + [ + 163, + 305 + ], + [ + 162, + 339 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 182, + 339 + ], + [ + 171, + 341 + ], + [ + 172, + 306 + ], + [ + 184, + 306 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 45, + 425 + ], + [ + 48, + 472 + ], + [ + 22, + 471 + ], + [ + 21, + 427 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 161, + 372 + ], + [ + 149, + 372 + ], + [ + 152, + 406 + ], + [ + 166, + 407 + ], + [ + 165, + 372 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 185, + 414 + ], + [ + 178, + 414 + ], + [ + 169, + 419 + ], + [ + 168, + 426 + ], + [ + 171, + 435 + ], + [ + 179, + 437 + ], + [ + 186, + 437 + ], + [ + 191, + 432 + ], + [ + 192, + 424 + ], + [ + 191, + 417 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 177, + 394 + ], + [ + 180, + 492 + ], + [ + 184, + 493 + ], + [ + 179, + 386 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 700, + 480 + ], + [ + 719, + 487 + ], + [ + 722, + 491 + ], + [ + 700, + 503 + ], + [ + 588, + 531 + ], + [ + 284, + 547 + ], + [ + 181, + 556 + ], + [ + 41, + 560 + ], + [ + 0, + 560 + ], + [ + 0, + 502 + ], + [ + 132, + 501 + ], + [ + 189, + 501 + ], + [ + 213, + 505 + ], + [ + 235, + 510 + ], + [ + 461, + 501 + ], + [ + 644, + 486 + ], + [ + 660, + 483 + ], + [ + 678, + 480 + ], + [ + 697, + 480 + ], + [ + 704, + 480 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 183, + 377 + ], + [ + 174, + 378 + ], + [ + 169, + 385 + ], + [ + 169, + 394 + ], + [ + 173, + 399 + ], + [ + 178, + 403 + ], + [ + 185, + 403 + ], + [ + 188, + 401 + ], + [ + 187, + 391 + ], + [ + 186, + 381 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 147, + 354 + ], + [ + 138, + 355 + ], + [ + 131, + 360 + ], + [ + 131, + 374 + ], + [ + 136, + 384 + ], + [ + 145, + 389 + ], + [ + 154, + 386 + ], + [ + 158, + 379 + ], + [ + 159, + 370 + ], + [ + 158, + 361 + ], + [ + 153, + 356 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 155, + 414 + ], + [ + 154, + 435 + ], + [ + 134, + 433 + ], + [ + 135, + 413 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 146, + 506 + ], + [ + 141, + 505 + ], + [ + 139, + 356 + ], + [ + 143, + 356 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 159, + 503 + ], + [ + 157, + 443 + ], + [ + 165, + 443 + ], + [ + 163, + 505 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 140, + 537 + ], + [ + 112, + 539 + ], + [ + 90, + 0 + ], + [ + 114, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 362, + 325 + ], + [ + 367, + 525 + ], + [ + 374, + 526 + ], + [ + 367, + 325 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 369, + 294 + ], + [ + 359, + 296 + ], + [ + 352, + 304 + ], + [ + 349, + 313 + ], + [ + 351, + 326 + ], + [ + 360, + 337 + ], + [ + 370, + 343 + ], + [ + 377, + 340 + ], + [ + 380, + 332 + ], + [ + 375, + 298 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 307, + 514 + ], + [ + 325, + 515 + ], + [ + 304, + 66 + ], + [ + 304, + 62 + ], + [ + 297, + 58 + ], + [ + 290, + 59 + ], + [ + 290, + 62 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 503, + 169 + ], + [ + 309, + 171 + ], + [ + 284, + 171 + ], + [ + 187, + 184 + ], + [ + 187, + 179 + ], + [ + 284, + 168 + ], + [ + 192, + 73 + ], + [ + 196, + 73 + ], + [ + 291, + 168 + ], + [ + 308, + 168 + ], + [ + 426, + 73 + ], + [ + 431, + 75 + ], + [ + 311, + 168 + ], + [ + 503, + 165 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 154, + 102 + ], + [ + 125, + 101 + ], + [ + 88, + 102 + ], + [ + 2, + 108 + ], + [ + 0, + 105 + ], + [ + 87, + 98 + ], + [ + 0, + 11 + ], + [ + 0, + 8 + ], + [ + 91, + 97 + ], + [ + 121, + 97 + ], + [ + 233, + 1 + ], + [ + 240, + 2 + ], + [ + 128, + 98 + ], + [ + 363, + 93 + ], + [ + 361, + 97 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 685, + 300 + ], + [ + 685, + 486 + ], + [ + 693, + 486 + ], + [ + 689, + 303 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 668, + 127 + ], + [ + 681, + 474 + ], + [ + 670, + 475 + ], + [ + 656, + 126 + ], + [ + 659, + 125 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 697, + 303 + ], + [ + 698, + 340 + ], + [ + 684, + 340 + ], + [ + 684, + 302 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 695, + 353 + ], + [ + 696, + 387 + ], + [ + 684, + 387 + ], + [ + 685, + 353 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 696, + 423 + ], + [ + 699, + 427 + ], + [ + 699, + 431 + ], + [ + 696, + 437 + ], + [ + 694, + 440 + ], + [ + 687, + 441 + ], + [ + 682, + 440 + ], + [ + 679, + 436 + ], + [ + 679, + 429 + ], + [ + 680, + 423 + ], + [ + 685, + 422 + ], + [ + 690, + 421 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 643, + 306 + ], + [ + 650, + 499 + ], + [ + 655, + 499 + ], + [ + 648, + 303 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 634, + 321 + ], + [ + 636, + 367 + ], + [ + 623, + 367 + ], + [ + 622, + 362 + ], + [ + 617, + 360 + ], + [ + 617, + 355 + ], + [ + 620, + 352 + ], + [ + 623, + 351 + ], + [ + 622, + 349 + ], + [ + 619, + 347 + ], + [ + 619, + 342 + ], + [ + 620, + 339 + ], + [ + 624, + 338 + ], + [ + 623, + 334 + ], + [ + 618, + 329 + ], + [ + 618, + 325 + ], + [ + 620, + 322 + ], + [ + 623, + 321 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 655, + 318 + ], + [ + 657, + 367 + ], + [ + 640, + 367 + ], + [ + 641, + 318 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 625, + 377 + ], + [ + 672, + 376 + ], + [ + 650, + 415 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 585, + 507 + ], + [ + 558, + 508 + ], + [ + 547, + 6 + ], + [ + 547, + 0 + ], + [ + 562, + 1 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 6, + 641 + ], + [ + 75, + 629 + ], + [ + 72, + 611 + ], + [ + 57, + 602 + ], + [ + 43, + 600 + ], + [ + 21, + 600 + ], + [ + 0, + 595 + ], + [ + 0, + 641 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1029, + 579 + ], + [ + 1001, + 584 + ], + [ + 995, + 585 + ], + [ + 982, + 590 + ], + [ + 960, + 590 + ], + [ + 934, + 588 + ], + [ + 917, + 575 + ], + [ + 911, + 560 + ], + [ + 909, + 551 + ], + [ + 873, + 547 + ], + [ + 870, + 546 + ], + [ + 869, + 537 + ], + [ + 863, + 533 + ], + [ + 853, + 512 + ], + [ + 851, + 487 + ], + [ + 852, + 462 + ], + [ + 863, + 450 + ], + [ + 875, + 435 + ], + [ + 897, + 404 + ], + [ + 893, + 395 + ], + [ + 894, + 393 + ], + [ + 946, + 383 + ], + [ + 948, + 379 + ], + [ + 1005, + 372 + ], + [ + 1060, + 371 + ], + [ + 1122, + 371 + ], + [ + 1138, + 377 + ], + [ + 1153, + 379 + ], + [ + 1172, + 383 + ], + [ + 1187, + 387 + ], + [ + 1245, + 422 + ], + [ + 1255, + 431 + ], + [ + 1262, + 437 + ], + [ + 1266, + 438 + ], + [ + 1329, + 445 + ], + [ + 1368, + 452 + ], + [ + 1395, + 459 + ], + [ + 1427, + 467 + ], + [ + 1435, + 471 + ], + [ + 1445, + 483 + ], + [ + 1448, + 498 + ], + [ + 1446, + 511 + ], + [ + 1444, + 525 + ], + [ + 1438, + 541 + ], + [ + 1434, + 546 + ], + [ + 1422, + 550 + ], + [ + 1402, + 555 + ], + [ + 1390, + 559 + ], + [ + 1387, + 561 + ], + [ + 1379, + 572 + ], + [ + 1372, + 575 + ], + [ + 1366, + 577 + ], + [ + 1361, + 586 + ], + [ + 1355, + 587 + ], + [ + 1339, + 589 + ], + [ + 1316, + 589 + ], + [ + 1303, + 588 + ], + [ + 1293, + 576 + ], + [ + 1284, + 562 + ], + [ + 1282, + 559 + ], + [ + 1048, + 560 + ], + [ + 1043, + 566 + ], + [ + 1037, + 573 + ], + [ + 1031, + 576 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000149_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000149_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..56cf13f6d5e4108ca313020587fd7312594739a1 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000149_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000149_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000149_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..631019bda80c436a1926f87a0d169ffdaabc2854 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000149_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000150_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000150_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..d71f680240774c0b8d8804e4cb9e825144d3e017 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000150_000019_gtFine_polygons.json @@ -0,0 +1,6544 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 101, + 299 + ], + [ + 2048, + 352 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 286 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 549, + 476 + ], + [ + 737, + 490 + ], + [ + 765, + 493 + ], + [ + 765, + 497 + ], + [ + 289, + 584 + ], + [ + 0, + 645 + ], + [ + 0, + 564 + ], + [ + 422, + 504 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1293, + 475 + ], + [ + 1218, + 486 + ], + [ + 1256, + 499 + ], + [ + 1511, + 564 + ], + [ + 2047, + 707 + ], + [ + 2047, + 582 + ], + [ + 1443, + 488 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1232, + 467 + ], + [ + 1207, + 471 + ], + [ + 1171, + 470 + ], + [ + 1144, + 462 + ], + [ + 1144, + 455 + ], + [ + 1189, + 445 + ], + [ + 1228, + 446 + ], + [ + 1237, + 451 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 734, + 490 + ], + [ + 748, + 494 + ], + [ + 761, + 494 + ], + [ + 782, + 490 + ], + [ + 798, + 485 + ], + [ + 811, + 477 + ], + [ + 822, + 467 + ], + [ + 823, + 460 + ], + [ + 817, + 452 + ], + [ + 801, + 452 + ], + [ + 782, + 453 + ], + [ + 747, + 458 + ], + [ + 730, + 471 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2050, + 610 + ], + [ + 1802, + 572 + ], + [ + 1783, + 566 + ], + [ + 1751, + 555 + ], + [ + 1532, + 506 + ], + [ + 1365, + 488 + ], + [ + 1931, + 500 + ], + [ + 2048, + 494 + ], + [ + 2048, + 606 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 184, + 559 + ], + [ + 305, + 538 + ], + [ + 368, + 529 + ], + [ + 373, + 487 + ], + [ + 229, + 507 + ], + [ + 140, + 519 + ], + [ + 38, + 551 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 633, + 17 + ], + [ + 1055, + 396 + ], + [ + 1501, + 0 + ], + [ + 619, + 0 + ], + [ + 609, + 0 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 0 + ], + [ + 703, + 0 + ], + [ + 703, + 14 + ], + [ + 737, + 53 + ], + [ + 743, + 53 + ], + [ + 762, + 73 + ], + [ + 763, + 73 + ], + [ + 783, + 76 + ], + [ + 786, + 77 + ], + [ + 777, + 90 + ], + [ + 808, + 135 + ], + [ + 866, + 134 + ], + [ + 866, + 115 + ], + [ + 903, + 112 + ], + [ + 915, + 118 + ], + [ + 913, + 136 + ], + [ + 938, + 145 + ], + [ + 939, + 152 + ], + [ + 940, + 152 + ], + [ + 940, + 162 + ], + [ + 946, + 164 + ], + [ + 949, + 281 + ], + [ + 951, + 313 + ], + [ + 976, + 301 + ], + [ + 987, + 298 + ], + [ + 1009, + 298 + ], + [ + 1019, + 304 + ], + [ + 1013, + 304 + ], + [ + 1013, + 328 + ], + [ + 1021, + 332 + ], + [ + 1026, + 342 + ], + [ + 1031, + 349 + ], + [ + 1036, + 246 + ], + [ + 1039, + 246 + ], + [ + 1051, + 154 + ], + [ + 1053, + 98 + ], + [ + 1053, + 98 + ], + [ + 1056, + 156 + ], + [ + 1060, + 182 + ], + [ + 1062, + 199 + ], + [ + 1070, + 136 + ], + [ + 1071, + 107 + ], + [ + 1071, + 107 + ], + [ + 1075, + 149 + ], + [ + 1085, + 240 + ], + [ + 1097, + 244 + ], + [ + 1098, + 292 + ], + [ + 1106, + 294 + ], + [ + 1105, + 319 + ], + [ + 1117, + 320 + ], + [ + 1120, + 284 + ], + [ + 1123, + 279 + ], + [ + 1124, + 265 + ], + [ + 1139, + 264 + ], + [ + 1143, + 263 + ], + [ + 1144, + 242 + ], + [ + 1147, + 244 + ], + [ + 1149, + 236 + ], + [ + 1190, + 236 + ], + [ + 1192, + 96 + ], + [ + 1204, + 95 + ], + [ + 1204, + 89 + ], + [ + 1220, + 89 + ], + [ + 1233, + 63 + ], + [ + 1256, + 66 + ], + [ + 1256, + 61 + ], + [ + 1265, + 51 + ], + [ + 1273, + 32 + ], + [ + 1293, + 29 + ], + [ + 1314, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 546 + ], + [ + 1915, + 529 + ], + [ + 1884, + 523 + ], + [ + 1826, + 516 + ], + [ + 1732, + 511 + ], + [ + 1581, + 496 + ], + [ + 1327, + 475 + ], + [ + 1230, + 463 + ], + [ + 1218, + 458 + ], + [ + 1194, + 453 + ], + [ + 1156, + 455 + ], + [ + 1134, + 456 + ], + [ + 1113, + 449 + ], + [ + 1088, + 448 + ], + [ + 1046, + 449 + ], + [ + 907, + 459 + ], + [ + 838, + 461 + ], + [ + 805, + 461 + ], + [ + 763, + 467 + ], + [ + 298, + 503 + ], + [ + 184, + 516 + ], + [ + 0, + 540 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 438, + 248 + ], + [ + 449, + 243 + ], + [ + 447, + 234 + ], + [ + 450, + 234 + ], + [ + 451, + 242 + ], + [ + 462, + 248 + ], + [ + 473, + 242 + ], + [ + 464, + 237 + ], + [ + 467, + 233 + ], + [ + 468, + 223 + ], + [ + 463, + 222 + ], + [ + 465, + 210 + ], + [ + 447, + 203 + ], + [ + 436, + 211 + ], + [ + 424, + 216 + ], + [ + 424, + 234 + ], + [ + 436, + 232 + ], + [ + 431, + 237 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 526, + 162 + ], + [ + 575, + 162 + ], + [ + 575, + 178 + ], + [ + 567, + 178 + ], + [ + 569, + 250 + ], + [ + 577, + 252 + ], + [ + 576, + 266 + ], + [ + 523, + 266 + ], + [ + 520, + 252 + ], + [ + 534, + 251 + ], + [ + 533, + 178 + ], + [ + 527, + 179 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 510, + 228 + ], + [ + 525, + 233 + ], + [ + 525, + 245 + ], + [ + 528, + 245 + ], + [ + 528, + 253 + ], + [ + 526, + 257 + ], + [ + 534, + 262 + ], + [ + 523, + 268 + ], + [ + 513, + 260 + ], + [ + 513, + 256 + ], + [ + 510, + 252 + ], + [ + 509, + 256 + ], + [ + 511, + 259 + ], + [ + 501, + 267 + ], + [ + 494, + 256 + ], + [ + 497, + 254 + ], + [ + 497, + 252 + ], + [ + 489, + 255 + ], + [ + 488, + 236 + ], + [ + 500, + 236 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 43, + 434 + ], + [ + 107, + 436 + ], + [ + 139, + 436 + ], + [ + 161, + 447 + ], + [ + 173, + 465 + ], + [ + 185, + 485 + ], + [ + 193, + 506 + ], + [ + 197, + 523 + ], + [ + 196, + 542 + ], + [ + 192, + 569 + ], + [ + 188, + 584 + ], + [ + 180, + 589 + ], + [ + 173, + 590 + ], + [ + 162, + 592 + ], + [ + 150, + 587 + ], + [ + 149, + 578 + ], + [ + 93, + 590 + ], + [ + 86, + 606 + ], + [ + 73, + 615 + ], + [ + 63, + 617 + ], + [ + 50, + 617 + ], + [ + 44, + 615 + ], + [ + 35, + 606 + ], + [ + 30, + 600 + ], + [ + 0, + 601 + ], + [ + 0, + 431 + ], + [ + 4, + 432 + ], + [ + 31, + 433 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 929, + 3 + ], + [ + 930, + 9 + ], + [ + 980, + 10 + ], + [ + 980, + 4 + ], + [ + 959, + 5 + ], + [ + 958, + 0 + ], + [ + 952, + 0 + ], + [ + 952, + 4 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1024, + 378 + ], + [ + 1019, + 373 + ], + [ + 1016, + 368 + ], + [ + 1020, + 362 + ], + [ + 1021, + 357 + ], + [ + 1016, + 354 + ], + [ + 1012, + 351 + ], + [ + 1006, + 350 + ], + [ + 1008, + 343 + ], + [ + 1014, + 341 + ], + [ + 1021, + 338 + ], + [ + 1018, + 336 + ], + [ + 1011, + 329 + ], + [ + 1001, + 318 + ], + [ + 992, + 311 + ], + [ + 988, + 301 + ], + [ + 989, + 296 + ], + [ + 981, + 284 + ], + [ + 980, + 275 + ], + [ + 977, + 271 + ], + [ + 972, + 278 + ], + [ + 963, + 283 + ], + [ + 953, + 284 + ], + [ + 952, + 282 + ], + [ + 952, + 277 + ], + [ + 950, + 274 + ], + [ + 949, + 269 + ], + [ + 951, + 265 + ], + [ + 951, + 262 + ], + [ + 939, + 261 + ], + [ + 941, + 253 + ], + [ + 941, + 250 + ], + [ + 946, + 243 + ], + [ + 943, + 244 + ], + [ + 930, + 248 + ], + [ + 927, + 244 + ], + [ + 926, + 236 + ], + [ + 930, + 235 + ], + [ + 932, + 240 + ], + [ + 938, + 237 + ], + [ + 945, + 233 + ], + [ + 933, + 226 + ], + [ + 941, + 216 + ], + [ + 942, + 212 + ], + [ + 931, + 211 + ], + [ + 926, + 208 + ], + [ + 926, + 206 + ], + [ + 934, + 203 + ], + [ + 937, + 200 + ], + [ + 941, + 195 + ], + [ + 930, + 194 + ], + [ + 903, + 192 + ], + [ + 919, + 189 + ], + [ + 922, + 182 + ], + [ + 916, + 182 + ], + [ + 912, + 174 + ], + [ + 914, + 167 + ], + [ + 908, + 159 + ], + [ + 902, + 164 + ], + [ + 896, + 162 + ], + [ + 904, + 156 + ], + [ + 910, + 155 + ], + [ + 909, + 152 + ], + [ + 901, + 151 + ], + [ + 885, + 149 + ], + [ + 885, + 149 + ], + [ + 882, + 142 + ], + [ + 883, + 137 + ], + [ + 880, + 132 + ], + [ + 872, + 138 + ], + [ + 868, + 137 + ], + [ + 865, + 130 + ], + [ + 860, + 127 + ], + [ + 855, + 125 + ], + [ + 855, + 122 + ], + [ + 848, + 122 + ], + [ + 845, + 121 + ], + [ + 833, + 124 + ], + [ + 828, + 125 + ], + [ + 824, + 132 + ], + [ + 819, + 136 + ], + [ + 807, + 139 + ], + [ + 807, + 144 + ], + [ + 814, + 150 + ], + [ + 819, + 155 + ], + [ + 816, + 161 + ], + [ + 805, + 159 + ], + [ + 806, + 167 + ], + [ + 804, + 176 + ], + [ + 805, + 182 + ], + [ + 811, + 188 + ], + [ + 807, + 198 + ], + [ + 807, + 205 + ], + [ + 814, + 207 + ], + [ + 813, + 217 + ], + [ + 802, + 217 + ], + [ + 794, + 217 + ], + [ + 798, + 233 + ], + [ + 800, + 236 + ], + [ + 802, + 243 + ], + [ + 799, + 245 + ], + [ + 805, + 250 + ], + [ + 802, + 253 + ], + [ + 802, + 257 + ], + [ + 804, + 258 + ], + [ + 810, + 259 + ], + [ + 813, + 256 + ], + [ + 811, + 263 + ], + [ + 807, + 272 + ], + [ + 804, + 279 + ], + [ + 811, + 279 + ], + [ + 807, + 287 + ], + [ + 802, + 296 + ], + [ + 805, + 303 + ], + [ + 814, + 296 + ], + [ + 816, + 300 + ], + [ + 813, + 303 + ], + [ + 820, + 307 + ], + [ + 821, + 308 + ], + [ + 828, + 310 + ], + [ + 823, + 316 + ], + [ + 817, + 321 + ], + [ + 819, + 331 + ], + [ + 820, + 337 + ], + [ + 826, + 342 + ], + [ + 832, + 341 + ], + [ + 838, + 340 + ], + [ + 845, + 340 + ], + [ + 846, + 340 + ], + [ + 845, + 397 + ], + [ + 843, + 445 + ], + [ + 852, + 444 + ], + [ + 850, + 436 + ], + [ + 914, + 438 + ], + [ + 919, + 434 + ], + [ + 921, + 427 + ], + [ + 921, + 418 + ], + [ + 919, + 411 + ], + [ + 914, + 406 + ], + [ + 907, + 401 + ], + [ + 903, + 400 + ], + [ + 895, + 400 + ], + [ + 892, + 403 + ], + [ + 887, + 403 + ], + [ + 886, + 372 + ], + [ + 892, + 374 + ], + [ + 897, + 374 + ], + [ + 902, + 375 + ], + [ + 906, + 374 + ], + [ + 910, + 374 + ], + [ + 914, + 374 + ], + [ + 916, + 372 + ], + [ + 917, + 375 + ], + [ + 917, + 379 + ], + [ + 918, + 390 + ], + [ + 920, + 399 + ], + [ + 919, + 439 + ], + [ + 923, + 424 + ], + [ + 924, + 418 + ], + [ + 924, + 363 + ], + [ + 930, + 362 + ], + [ + 934, + 363 + ], + [ + 943, + 360 + ], + [ + 947, + 362 + ], + [ + 954, + 363 + ], + [ + 945, + 366 + ], + [ + 946, + 371 + ], + [ + 955, + 378 + ], + [ + 957, + 382 + ], + [ + 964, + 387 + ], + [ + 971, + 389 + ], + [ + 976, + 389 + ], + [ + 981, + 393 + ], + [ + 986, + 389 + ], + [ + 992, + 387 + ], + [ + 998, + 385 + ], + [ + 1009, + 383 + ], + [ + 1014, + 382 + ], + [ + 1021, + 382 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 750, + 298 + ], + [ + 740, + 295 + ], + [ + 739, + 301 + ], + [ + 736, + 301 + ], + [ + 736, + 305 + ], + [ + 736, + 306 + ], + [ + 735, + 308 + ], + [ + 736, + 312 + ], + [ + 737, + 322 + ], + [ + 738, + 325 + ], + [ + 743, + 327 + ], + [ + 748, + 321 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 763, + 246 + ], + [ + 745, + 246 + ], + [ + 745, + 239 + ], + [ + 742, + 239 + ], + [ + 742, + 338 + ], + [ + 739, + 432 + ], + [ + 754, + 478 + ], + [ + 764, + 479 + ], + [ + 771, + 481 + ], + [ + 765, + 238 + ], + [ + 762, + 238 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 753, + 376 + ], + [ + 763, + 382 + ], + [ + 760, + 384 + ], + [ + 759, + 387 + ], + [ + 760, + 390 + ], + [ + 760, + 395 + ], + [ + 759, + 400 + ], + [ + 759, + 405 + ], + [ + 758, + 410 + ], + [ + 756, + 412 + ], + [ + 750, + 397 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 713, + 388 + ], + [ + 701, + 388 + ], + [ + 700, + 442 + ], + [ + 706, + 454 + ], + [ + 728, + 408 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 692, + 334 + ], + [ + 702, + 332 + ], + [ + 759, + 333 + ], + [ + 769, + 336 + ], + [ + 758, + 342 + ], + [ + 755, + 345 + ], + [ + 758, + 486 + ], + [ + 728, + 485 + ], + [ + 715, + 451 + ], + [ + 712, + 440 + ], + [ + 709, + 347 + ], + [ + 703, + 346 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 765, + 442 + ], + [ + 766, + 484 + ], + [ + 758, + 484 + ], + [ + 754, + 439 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 686, + 444 + ], + [ + 698, + 365 + ], + [ + 701, + 366 + ], + [ + 691, + 439 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 691, + 349 + ], + [ + 698, + 349 + ], + [ + 699, + 342 + ], + [ + 703, + 342 + ], + [ + 702, + 348 + ], + [ + 711, + 348 + ], + [ + 715, + 356 + ], + [ + 703, + 355 + ], + [ + 703, + 370 + ], + [ + 698, + 370 + ], + [ + 698, + 356 + ], + [ + 695, + 355 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 695, + 450 + ], + [ + 704, + 439 + ], + [ + 720, + 438 + ], + [ + 727, + 440 + ], + [ + 728, + 445 + ], + [ + 731, + 451 + ], + [ + 733, + 454 + ], + [ + 737, + 460 + ], + [ + 739, + 468 + ], + [ + 739, + 479 + ], + [ + 739, + 488 + ], + [ + 739, + 493 + ], + [ + 736, + 495 + ], + [ + 733, + 496 + ], + [ + 729, + 496 + ], + [ + 724, + 498 + ], + [ + 720, + 499 + ], + [ + 718, + 496 + ], + [ + 712, + 496 + ], + [ + 707, + 470 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 640, + 445 + ], + [ + 643, + 439 + ], + [ + 652, + 436 + ], + [ + 663, + 435 + ], + [ + 674, + 437 + ], + [ + 692, + 437 + ], + [ + 696, + 437 + ], + [ + 699, + 437 + ], + [ + 709, + 446 + ], + [ + 717, + 467 + ], + [ + 717, + 487 + ], + [ + 716, + 496 + ], + [ + 716, + 500 + ], + [ + 710, + 502 + ], + [ + 703, + 501 + ], + [ + 702, + 502 + ], + [ + 694, + 503 + ], + [ + 689, + 502 + ], + [ + 688, + 499 + ], + [ + 679, + 497 + ], + [ + 660, + 477 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 614, + 441 + ], + [ + 629, + 440 + ], + [ + 657, + 441 + ], + [ + 672, + 453 + ], + [ + 673, + 464 + ], + [ + 676, + 481 + ], + [ + 679, + 488 + ], + [ + 681, + 495 + ], + [ + 681, + 501 + ], + [ + 680, + 503 + ], + [ + 676, + 506 + ], + [ + 670, + 505 + ], + [ + 664, + 507 + ], + [ + 657, + 508 + ], + [ + 644, + 509 + ], + [ + 631, + 496 + ], + [ + 615, + 466 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 534, + 446 + ], + [ + 543, + 438 + ], + [ + 608, + 438 + ], + [ + 620, + 444 + ], + [ + 630, + 455 + ], + [ + 636, + 468 + ], + [ + 641, + 476 + ], + [ + 641, + 484 + ], + [ + 642, + 501 + ], + [ + 641, + 510 + ], + [ + 637, + 514 + ], + [ + 630, + 515 + ], + [ + 628, + 515 + ], + [ + 620, + 511 + ], + [ + 610, + 512 + ], + [ + 610, + 515 + ], + [ + 606, + 519 + ], + [ + 597, + 520 + ], + [ + 593, + 517 + ], + [ + 580, + 515 + ], + [ + 565, + 514 + ], + [ + 551, + 498 + ], + [ + 533, + 474 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 442, + 444 + ], + [ + 450, + 439 + ], + [ + 512, + 436 + ], + [ + 534, + 438 + ], + [ + 547, + 444 + ], + [ + 560, + 467 + ], + [ + 566, + 488 + ], + [ + 568, + 507 + ], + [ + 567, + 517 + ], + [ + 564, + 526 + ], + [ + 559, + 527 + ], + [ + 554, + 528 + ], + [ + 550, + 526 + ], + [ + 548, + 521 + ], + [ + 532, + 522 + ], + [ + 532, + 525 + ], + [ + 531, + 530 + ], + [ + 521, + 532 + ], + [ + 514, + 528 + ], + [ + 510, + 524 + ], + [ + 470, + 525 + ], + [ + 425, + 460 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 224, + 485 + ], + [ + 220, + 500 + ], + [ + 219, + 512 + ], + [ + 220, + 522 + ], + [ + 222, + 529 + ], + [ + 225, + 535 + ], + [ + 234, + 539 + ], + [ + 243, + 538 + ], + [ + 250, + 536 + ], + [ + 256, + 535 + ], + [ + 263, + 535 + ], + [ + 270, + 533 + ], + [ + 277, + 529 + ], + [ + 283, + 518 + ], + [ + 290, + 504 + ], + [ + 293, + 493 + ], + [ + 293, + 481 + ], + [ + 290, + 466 + ], + [ + 287, + 460 + ], + [ + 274, + 456 + ], + [ + 267, + 453 + ], + [ + 253, + 452 + ], + [ + 246, + 452 + ], + [ + 238, + 455 + ], + [ + 224, + 467 + ], + [ + 224, + 474 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 355, + 436 + ], + [ + 435, + 437 + ], + [ + 449, + 438 + ], + [ + 462, + 450 + ], + [ + 472, + 470 + ], + [ + 476, + 482 + ], + [ + 476, + 500 + ], + [ + 480, + 519 + ], + [ + 477, + 531 + ], + [ + 475, + 537 + ], + [ + 468, + 538 + ], + [ + 458, + 536 + ], + [ + 454, + 529 + ], + [ + 449, + 528 + ], + [ + 425, + 531 + ], + [ + 426, + 537 + ], + [ + 424, + 544 + ], + [ + 416, + 545 + ], + [ + 409, + 543 + ], + [ + 406, + 535 + ], + [ + 375, + 534 + ], + [ + 374, + 536 + ], + [ + 365, + 539 + ], + [ + 356, + 538 + ], + [ + 351, + 530 + ], + [ + 316, + 532 + ], + [ + 315, + 539 + ], + [ + 314, + 545 + ], + [ + 306, + 547 + ], + [ + 295, + 545 + ], + [ + 291, + 532 + ], + [ + 290, + 515 + ], + [ + 290, + 497 + ], + [ + 299, + 482 + ], + [ + 310, + 476 + ], + [ + 307, + 470 + ], + [ + 308, + 466 + ], + [ + 315, + 465 + ], + [ + 322, + 461 + ], + [ + 340, + 439 + ], + [ + 347, + 436 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1049, + 432 + ], + [ + 1048, + 425 + ], + [ + 1052, + 421 + ], + [ + 1059, + 419 + ], + [ + 1061, + 416 + ], + [ + 1060, + 414 + ], + [ + 1061, + 407 + ], + [ + 1062, + 401 + ], + [ + 1059, + 396 + ], + [ + 1062, + 392 + ], + [ + 1061, + 380 + ], + [ + 1057, + 383 + ], + [ + 1053, + 382 + ], + [ + 1055, + 377 + ], + [ + 1049, + 371 + ], + [ + 1040, + 372 + ], + [ + 1036, + 377 + ], + [ + 1036, + 387 + ], + [ + 1036, + 398 + ], + [ + 1035, + 403 + ], + [ + 1031, + 409 + ], + [ + 1033, + 414 + ], + [ + 1039, + 418 + ], + [ + 1043, + 421 + ], + [ + 1045, + 425 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1114, + 339 + ], + [ + 1109, + 338 + ], + [ + 1105, + 339 + ], + [ + 1101, + 342 + ], + [ + 1095, + 350 + ], + [ + 1076, + 364 + ], + [ + 1076, + 369 + ], + [ + 1077, + 380 + ], + [ + 1072, + 380 + ], + [ + 1068, + 378 + ], + [ + 1067, + 382 + ], + [ + 1068, + 388 + ], + [ + 1065, + 397 + ], + [ + 1067, + 403 + ], + [ + 1076, + 403 + ], + [ + 1083, + 403 + ], + [ + 1090, + 405 + ], + [ + 1092, + 405 + ], + [ + 1096, + 440 + ], + [ + 1097, + 440 + ], + [ + 1096, + 407 + ], + [ + 1099, + 406 + ], + [ + 1099, + 406 + ], + [ + 1106, + 406 + ], + [ + 1115, + 407 + ], + [ + 1117, + 406 + ], + [ + 1115, + 343 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 913, + 317 + ], + [ + 914, + 339 + ], + [ + 920, + 341 + ], + [ + 925, + 339 + ], + [ + 926, + 318 + ], + [ + 920, + 314 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 903, + 320 + ], + [ + 845, + 334 + ], + [ + 835, + 344 + ], + [ + 834, + 367 + ], + [ + 835, + 442 + ], + [ + 837, + 443 + ], + [ + 837, + 345 + ], + [ + 846, + 337 + ], + [ + 903, + 324 + ], + [ + 920, + 324 + ], + [ + 919, + 321 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 840, + 390 + ], + [ + 834, + 390 + ], + [ + 834, + 414 + ], + [ + 837, + 414 + ], + [ + 841, + 413 + ], + [ + 843, + 410 + ], + [ + 842, + 395 + ], + [ + 841, + 395 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 884, + 391 + ], + [ + 871, + 390 + ], + [ + 872, + 403 + ], + [ + 885, + 402 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 786, + 390 + ], + [ + 788, + 412 + ], + [ + 779, + 412 + ], + [ + 779, + 389 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 777, + 474 + ], + [ + 777, + 389 + ], + [ + 779, + 389 + ], + [ + 779, + 474 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 961, + 115 + ], + [ + 961, + 118 + ], + [ + 999, + 120 + ], + [ + 999, + 117 + ], + [ + 982, + 117 + ], + [ + 982, + 112 + ], + [ + 979, + 112 + ], + [ + 978, + 117 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1023, + 240 + ], + [ + 1013, + 237 + ], + [ + 1004, + 240 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1023, + 272 + ], + [ + 1017, + 276 + ], + [ + 1030, + 276 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1027, + 294 + ], + [ + 1021, + 294 + ], + [ + 1019, + 299 + ], + [ + 1029, + 299 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1024, + 321 + ], + [ + 1018, + 323 + ], + [ + 1017, + 326 + ], + [ + 1030, + 326 + ], + [ + 1030, + 323 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1028, + 336 + ], + [ + 1024, + 338 + ], + [ + 1033, + 339 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1009, + 368 + ], + [ + 1010, + 382 + ], + [ + 1003, + 383 + ], + [ + 1003, + 367 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 989, + 373 + ], + [ + 968, + 377 + ], + [ + 963, + 380 + ], + [ + 961, + 391 + ], + [ + 962, + 437 + ], + [ + 964, + 437 + ], + [ + 962, + 390 + ], + [ + 964, + 381 + ], + [ + 969, + 379 + ], + [ + 1006, + 372 + ], + [ + 1006, + 370 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1002, + 434 + ], + [ + 974, + 435 + ], + [ + 976, + 452 + ], + [ + 988, + 460 + ], + [ + 1004, + 445 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 970, + 435 + ], + [ + 948, + 435 + ], + [ + 942, + 443 + ], + [ + 940, + 451 + ], + [ + 937, + 458 + ], + [ + 937, + 463 + ], + [ + 938, + 470 + ], + [ + 944, + 472 + ], + [ + 948, + 472 + ], + [ + 953, + 466 + ], + [ + 970, + 467 + ], + [ + 980, + 467 + ], + [ + 986, + 461 + ], + [ + 986, + 454 + ], + [ + 986, + 451 + ], + [ + 985, + 446 + ], + [ + 985, + 442 + ], + [ + 982, + 436 + ], + [ + 975, + 435 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 888, + 446 + ], + [ + 881, + 437 + ], + [ + 871, + 436 + ], + [ + 850, + 435 + ], + [ + 850, + 443 + ], + [ + 856, + 455 + ], + [ + 861, + 464 + ], + [ + 866, + 469 + ], + [ + 870, + 477 + ], + [ + 872, + 481 + ], + [ + 873, + 483 + ], + [ + 882, + 476 + ], + [ + 889, + 459 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 865, + 439 + ], + [ + 834, + 439 + ], + [ + 823, + 441 + ], + [ + 819, + 446 + ], + [ + 816, + 452 + ], + [ + 811, + 462 + ], + [ + 807, + 467 + ], + [ + 808, + 484 + ], + [ + 811, + 489 + ], + [ + 817, + 491 + ], + [ + 824, + 490 + ], + [ + 829, + 485 + ], + [ + 864, + 482 + ], + [ + 865, + 487 + ], + [ + 870, + 487 + ], + [ + 872, + 486 + ], + [ + 873, + 483 + ], + [ + 874, + 470 + ], + [ + 875, + 456 + ], + [ + 872, + 445 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 924, + 434 + ], + [ + 930, + 460 + ], + [ + 934, + 463 + ], + [ + 935, + 468 + ], + [ + 936, + 490 + ], + [ + 933, + 493 + ], + [ + 931, + 493 + ], + [ + 928, + 493 + ], + [ + 926, + 492 + ], + [ + 923, + 488 + ], + [ + 891, + 490 + ], + [ + 890, + 494 + ], + [ + 887, + 495 + ], + [ + 884, + 495 + ], + [ + 878, + 494 + ], + [ + 877, + 488 + ], + [ + 878, + 477 + ], + [ + 879, + 473 + ], + [ + 881, + 463 + ], + [ + 881, + 455 + ], + [ + 882, + 445 + ], + [ + 884, + 438 + ], + [ + 890, + 433 + ], + [ + 898, + 433 + ], + [ + 910, + 433 + ], + [ + 922, + 433 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1060, + 431 + ], + [ + 1083, + 432 + ], + [ + 1091, + 447 + ], + [ + 1097, + 449 + ], + [ + 1101, + 450 + ], + [ + 1098, + 452 + ], + [ + 1097, + 455 + ], + [ + 1097, + 482 + ], + [ + 1096, + 494 + ], + [ + 1096, + 494 + ], + [ + 1093, + 495 + ], + [ + 1090, + 495 + ], + [ + 1086, + 495 + ], + [ + 1070, + 490 + ], + [ + 1052, + 477 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1036, + 427 + ], + [ + 995, + 430 + ], + [ + 984, + 452 + ], + [ + 975, + 447 + ], + [ + 971, + 453 + ], + [ + 980, + 459 + ], + [ + 974, + 471 + ], + [ + 974, + 500 + ], + [ + 976, + 514 + ], + [ + 983, + 516 + ], + [ + 989, + 515 + ], + [ + 994, + 513 + ], + [ + 995, + 505 + ], + [ + 1070, + 505 + ], + [ + 1070, + 512 + ], + [ + 1073, + 515 + ], + [ + 1079, + 516 + ], + [ + 1084, + 515 + ], + [ + 1087, + 512 + ], + [ + 1087, + 492 + ], + [ + 1087, + 487 + ], + [ + 1088, + 475 + ], + [ + 1086, + 468 + ], + [ + 1086, + 461 + ], + [ + 1086, + 458 + ], + [ + 1090, + 457 + ], + [ + 1091, + 453 + ], + [ + 1090, + 450 + ], + [ + 1085, + 448 + ], + [ + 1079, + 450 + ], + [ + 1065, + 429 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1110, + 359 + ], + [ + 1129, + 360 + ], + [ + 1159, + 368 + ], + [ + 1164, + 374 + ], + [ + 1165, + 459 + ], + [ + 1164, + 459 + ], + [ + 1162, + 374 + ], + [ + 1158, + 369 + ], + [ + 1129, + 362 + ], + [ + 1110, + 362 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1114, + 355 + ], + [ + 1104, + 355 + ], + [ + 1105, + 374 + ], + [ + 1113, + 375 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1237, + 313 + ], + [ + 1235, + 384 + ], + [ + 1214, + 382 + ], + [ + 1212, + 306 + ], + [ + 1233, + 307 + ], + [ + 1234, + 313 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1209, + 203 + ], + [ + 1210, + 309 + ], + [ + 1222, + 309 + ], + [ + 1220, + 204 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1296, + 313 + ], + [ + 1311, + 321 + ], + [ + 1312, + 359 + ], + [ + 1297, + 347 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1248, + 355 + ], + [ + 1223, + 355 + ], + [ + 1223, + 360 + ], + [ + 1248, + 362 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1119, + 288 + ], + [ + 1124, + 288 + ], + [ + 1134, + 287 + ], + [ + 1193, + 301 + ], + [ + 1212, + 308 + ], + [ + 1218, + 316 + ], + [ + 1219, + 349 + ], + [ + 1221, + 478 + ], + [ + 1222, + 479 + ], + [ + 1222, + 327 + ], + [ + 1221, + 315 + ], + [ + 1212, + 305 + ], + [ + 1193, + 299 + ], + [ + 1133, + 286 + ], + [ + 1117, + 285 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1107, + 277 + ], + [ + 1108, + 309 + ], + [ + 1124, + 310 + ], + [ + 1124, + 277 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1244, + 371 + ], + [ + 1236, + 368 + ], + [ + 1230, + 370 + ], + [ + 1228, + 375 + ], + [ + 1232, + 381 + ], + [ + 1235, + 385 + ], + [ + 1242, + 386 + ], + [ + 1246, + 382 + ], + [ + 1246, + 379 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1237, + 401 + ], + [ + 1228, + 391 + ], + [ + 1240, + 382 + ], + [ + 1244, + 400 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1256, + 381 + ], + [ + 1256, + 409 + ], + [ + 1239, + 409 + ], + [ + 1238, + 381 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1264, + 486 + ], + [ + 1214, + 487 + ], + [ + 1204, + 485 + ], + [ + 1195, + 481 + ], + [ + 1196, + 480 + ], + [ + 1209, + 479 + ], + [ + 1218, + 477 + ], + [ + 1228, + 476 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1271, + 430 + ], + [ + 1270, + 330 + ], + [ + 1271, + 315 + ], + [ + 1273, + 303 + ], + [ + 1277, + 303 + ], + [ + 1276, + 306 + ], + [ + 1274, + 308 + ], + [ + 1273, + 338 + ], + [ + 1273, + 436 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1275, + 299 + ], + [ + 1276, + 322 + ], + [ + 1279, + 323 + ], + [ + 1282, + 322 + ], + [ + 1282, + 320 + ], + [ + 1289, + 319 + ], + [ + 1289, + 300 + ], + [ + 1284, + 300 + ], + [ + 1283, + 298 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1262, + 385 + ], + [ + 1263, + 409 + ], + [ + 1271, + 409 + ], + [ + 1271, + 383 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1284, + 374 + ], + [ + 1286, + 404 + ], + [ + 1272, + 405 + ], + [ + 1272, + 374 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1335, + 389 + ], + [ + 1334, + 432 + ], + [ + 1337, + 432 + ], + [ + 1337, + 389 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1330, + 375 + ], + [ + 1330, + 393 + ], + [ + 1340, + 394 + ], + [ + 1341, + 376 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1336, + 358 + ], + [ + 1340, + 362 + ], + [ + 1340, + 369 + ], + [ + 1340, + 373 + ], + [ + 1337, + 376 + ], + [ + 1334, + 376 + ], + [ + 1331, + 375 + ], + [ + 1330, + 368 + ], + [ + 1330, + 364 + ], + [ + 1332, + 360 + ], + [ + 1333, + 359 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1382, + 382 + ], + [ + 1385, + 431 + ], + [ + 1387, + 432 + ], + [ + 1385, + 379 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1377, + 371 + ], + [ + 1351, + 369 + ], + [ + 1353, + 388 + ], + [ + 1381, + 387 + ], + [ + 1380, + 377 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1369, + 330 + ], + [ + 1375, + 387 + ], + [ + 1393, + 386 + ], + [ + 1389, + 329 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1434, + 371 + ], + [ + 1434, + 426 + ], + [ + 1437, + 427 + ], + [ + 1436, + 371 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1397, + 358 + ], + [ + 1398, + 378 + ], + [ + 1461, + 375 + ], + [ + 1475, + 364 + ], + [ + 1460, + 356 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1544, + 420 + ], + [ + 1542, + 413 + ], + [ + 1541, + 410 + ], + [ + 1535, + 408 + ], + [ + 1532, + 408 + ], + [ + 1529, + 411 + ], + [ + 1527, + 416 + ], + [ + 1527, + 419 + ], + [ + 1530, + 426 + ], + [ + 1541, + 428 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1549, + 246 + ], + [ + 1551, + 425 + ], + [ + 1562, + 426 + ], + [ + 1558, + 323 + ], + [ + 1557, + 245 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1539, + 249 + ], + [ + 1538, + 326 + ], + [ + 1635, + 325 + ], + [ + 1634, + 250 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1600, + 442 + ], + [ + 1601, + 433 + ], + [ + 1599, + 415 + ], + [ + 1594, + 405 + ], + [ + 1588, + 398 + ], + [ + 1590, + 389 + ], + [ + 1588, + 383 + ], + [ + 1580, + 381 + ], + [ + 1576, + 385 + ], + [ + 1573, + 391 + ], + [ + 1573, + 398 + ], + [ + 1573, + 403 + ], + [ + 1572, + 414 + ], + [ + 1572, + 425 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1177, + 467 + ], + [ + 1167, + 469 + ], + [ + 1164, + 468 + ], + [ + 1157, + 464 + ], + [ + 1157, + 457 + ], + [ + 1159, + 450 + ], + [ + 1161, + 447 + ], + [ + 1166, + 445 + ], + [ + 1170, + 445 + ], + [ + 1175, + 451 + ], + [ + 1177, + 459 + ], + [ + 1179, + 465 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 1198, + 460 + ], + [ + 1200, + 457 + ], + [ + 1201, + 449 + ], + [ + 1201, + 440 + ], + [ + 1199, + 433 + ], + [ + 1198, + 427 + ], + [ + 1196, + 424 + ], + [ + 1192, + 423 + ], + [ + 1190, + 426 + ], + [ + 1187, + 431 + ], + [ + 1185, + 435 + ], + [ + 1184, + 443 + ], + [ + 1185, + 449 + ], + [ + 1185, + 454 + ], + [ + 1186, + 459 + ], + [ + 1189, + 460 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1187, + 468 + ], + [ + 1182, + 469 + ], + [ + 1182, + 446 + ], + [ + 1186, + 446 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1214, + 424 + ], + [ + 1211, + 429 + ], + [ + 1208, + 437 + ], + [ + 1207, + 447 + ], + [ + 1209, + 454 + ], + [ + 1212, + 457 + ], + [ + 1218, + 455 + ], + [ + 1219, + 442 + ], + [ + 1218, + 429 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1262, + 430 + ], + [ + 1237, + 430 + ], + [ + 1228, + 443 + ], + [ + 1222, + 455 + ], + [ + 1218, + 471 + ], + [ + 1218, + 476 + ], + [ + 1219, + 484 + ], + [ + 1221, + 487 + ], + [ + 1227, + 491 + ], + [ + 1231, + 491 + ], + [ + 1233, + 489 + ], + [ + 1233, + 486 + ], + [ + 1236, + 484 + ], + [ + 1246, + 484 + ], + [ + 1253, + 480 + ], + [ + 1265, + 466 + ], + [ + 1268, + 452 + ], + [ + 1269, + 437 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1322, + 429 + ], + [ + 1318, + 425 + ], + [ + 1274, + 426 + ], + [ + 1266, + 429 + ], + [ + 1258, + 438 + ], + [ + 1255, + 447 + ], + [ + 1250, + 461 + ], + [ + 1246, + 472 + ], + [ + 1246, + 485 + ], + [ + 1247, + 492 + ], + [ + 1248, + 493 + ], + [ + 1251, + 495 + ], + [ + 1256, + 496 + ], + [ + 1258, + 496 + ], + [ + 1262, + 497 + ], + [ + 1266, + 498 + ], + [ + 1270, + 493 + ], + [ + 1282, + 479 + ], + [ + 1304, + 460 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1355, + 432 + ], + [ + 1347, + 426 + ], + [ + 1313, + 429 + ], + [ + 1291, + 430 + ], + [ + 1280, + 447 + ], + [ + 1279, + 453 + ], + [ + 1277, + 451 + ], + [ + 1275, + 451 + ], + [ + 1272, + 453 + ], + [ + 1272, + 458 + ], + [ + 1273, + 461 + ], + [ + 1270, + 467 + ], + [ + 1269, + 472 + ], + [ + 1266, + 482 + ], + [ + 1267, + 492 + ], + [ + 1269, + 497 + ], + [ + 1271, + 499 + ], + [ + 1274, + 501 + ], + [ + 1279, + 500 + ], + [ + 1285, + 501 + ], + [ + 1290, + 502 + ], + [ + 1297, + 504 + ], + [ + 1299, + 502 + ], + [ + 1301, + 499 + ], + [ + 1320, + 498 + ], + [ + 1335, + 493 + ], + [ + 1352, + 469 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1408, + 427 + ], + [ + 1375, + 428 + ], + [ + 1355, + 429 + ], + [ + 1344, + 437 + ], + [ + 1335, + 449 + ], + [ + 1327, + 459 + ], + [ + 1319, + 461 + ], + [ + 1318, + 465 + ], + [ + 1321, + 472 + ], + [ + 1320, + 478 + ], + [ + 1319, + 497 + ], + [ + 1320, + 507 + ], + [ + 1323, + 515 + ], + [ + 1332, + 519 + ], + [ + 1340, + 518 + ], + [ + 1344, + 511 + ], + [ + 1347, + 509 + ], + [ + 1348, + 512 + ], + [ + 1348, + 519 + ], + [ + 1351, + 521 + ], + [ + 1356, + 521 + ], + [ + 1362, + 522 + ], + [ + 1367, + 520 + ], + [ + 1367, + 513 + ], + [ + 1372, + 510 + ], + [ + 1385, + 504 + ], + [ + 1396, + 488 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1508, + 424 + ], + [ + 1495, + 419 + ], + [ + 1437, + 421 + ], + [ + 1408, + 423 + ], + [ + 1398, + 429 + ], + [ + 1388, + 453 + ], + [ + 1383, + 453 + ], + [ + 1375, + 454 + ], + [ + 1374, + 458 + ], + [ + 1380, + 461 + ], + [ + 1380, + 467 + ], + [ + 1374, + 483 + ], + [ + 1373, + 510 + ], + [ + 1375, + 520 + ], + [ + 1378, + 528 + ], + [ + 1380, + 529 + ], + [ + 1389, + 530 + ], + [ + 1392, + 528 + ], + [ + 1394, + 524 + ], + [ + 1401, + 523 + ], + [ + 1407, + 523 + ], + [ + 1406, + 529 + ], + [ + 1407, + 533 + ], + [ + 1409, + 534 + ], + [ + 1420, + 536 + ], + [ + 1425, + 532 + ], + [ + 1428, + 527 + ], + [ + 1446, + 521 + ], + [ + 1468, + 493 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1615, + 469 + ], + [ + 1592, + 426 + ], + [ + 1581, + 422 + ], + [ + 1555, + 421 + ], + [ + 1537, + 421 + ], + [ + 1499, + 422 + ], + [ + 1480, + 425 + ], + [ + 1463, + 438 + ], + [ + 1455, + 456 + ], + [ + 1450, + 463 + ], + [ + 1444, + 459 + ], + [ + 1439, + 460 + ], + [ + 1431, + 465 + ], + [ + 1434, + 471 + ], + [ + 1441, + 472 + ], + [ + 1446, + 472 + ], + [ + 1444, + 480 + ], + [ + 1438, + 496 + ], + [ + 1439, + 510 + ], + [ + 1440, + 526 + ], + [ + 1441, + 536 + ], + [ + 1444, + 540 + ], + [ + 1447, + 543 + ], + [ + 1452, + 544 + ], + [ + 1456, + 542 + ], + [ + 1459, + 538 + ], + [ + 1464, + 537 + ], + [ + 1471, + 536 + ], + [ + 1471, + 542 + ], + [ + 1473, + 547 + ], + [ + 1477, + 549 + ], + [ + 1482, + 550 + ], + [ + 1487, + 550 + ], + [ + 1491, + 548 + ], + [ + 1491, + 542 + ], + [ + 1493, + 534 + ], + [ + 1560, + 531 + ], + [ + 1561, + 537 + ], + [ + 1565, + 541 + ], + [ + 1571, + 542 + ], + [ + 1575, + 540 + ], + [ + 1576, + 536 + ], + [ + 1580, + 532 + ], + [ + 1598, + 531 + ], + [ + 1599, + 534 + ], + [ + 1600, + 543 + ], + [ + 1604, + 547 + ], + [ + 1616, + 548 + ], + [ + 1621, + 531 + ], + [ + 1623, + 501 + ], + [ + 1622, + 488 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1796, + 451 + ], + [ + 1664, + 450 + ], + [ + 1666, + 461 + ], + [ + 1629, + 462 + ], + [ + 1629, + 468 + ], + [ + 1639, + 483 + ], + [ + 1657, + 489 + ], + [ + 1707, + 497 + ], + [ + 1721, + 500 + ], + [ + 1738, + 511 + ], + [ + 1749, + 520 + ], + [ + 1759, + 517 + ], + [ + 1769, + 516 + ], + [ + 1775, + 515 + ], + [ + 1796, + 517 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1613, + 563 + ], + [ + 1663, + 569 + ], + [ + 1760, + 561 + ], + [ + 1761, + 508 + ], + [ + 1740, + 504 + ], + [ + 1607, + 505 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1722, + 485 + ], + [ + 1689, + 483 + ], + [ + 1682, + 486 + ], + [ + 1670, + 484 + ], + [ + 1666, + 476 + ], + [ + 1657, + 466 + ], + [ + 1645, + 463 + ], + [ + 1635, + 460 + ], + [ + 1623, + 461 + ], + [ + 1615, + 468 + ], + [ + 1608, + 478 + ], + [ + 1595, + 484 + ], + [ + 1586, + 494 + ], + [ + 1593, + 506 + ], + [ + 1602, + 516 + ], + [ + 1607, + 519 + ], + [ + 1617, + 519 + ], + [ + 1624, + 513 + ], + [ + 1641, + 512 + ], + [ + 1651, + 509 + ], + [ + 1669, + 510 + ], + [ + 1693, + 511 + ], + [ + 1705, + 512 + ], + [ + 1721, + 510 + ], + [ + 1733, + 509 + ], + [ + 1741, + 507 + ], + [ + 1736, + 497 + ], + [ + 1733, + 489 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1786, + 258 + ], + [ + 1790, + 546 + ], + [ + 1803, + 547 + ], + [ + 1794, + 258 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1817, + 187 + ], + [ + 1834, + 182 + ], + [ + 1856, + 187 + ], + [ + 1867, + 197 + ], + [ + 1872, + 210 + ], + [ + 1872, + 225 + ], + [ + 1866, + 238 + ], + [ + 1858, + 247 + ], + [ + 1844, + 255 + ], + [ + 1833, + 256 + ], + [ + 1820, + 254 + ], + [ + 1812, + 251 + ], + [ + 1808, + 223 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1786, + 181 + ], + [ + 1766, + 190 + ], + [ + 1768, + 265 + ], + [ + 1822, + 262 + ], + [ + 1820, + 184 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1918, + 164 + ], + [ + 1816, + 165 + ], + [ + 1792, + 166 + ], + [ + 1793, + 175 + ], + [ + 1880, + 173 + ], + [ + 1880, + 178 + ], + [ + 1787, + 178 + ], + [ + 1788, + 165 + ], + [ + 1757, + 168 + ], + [ + 1763, + 154 + ], + [ + 1808, + 146 + ], + [ + 1818, + 0 + ], + [ + 1905, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1848, + 493 + ], + [ + 1849, + 526 + ], + [ + 1865, + 526 + ], + [ + 1859, + 489 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1862, + 417 + ], + [ + 1836, + 417 + ], + [ + 1835, + 507 + ], + [ + 1866, + 506 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1836, + 372 + ], + [ + 1837, + 545 + ], + [ + 1805, + 546 + ], + [ + 1798, + 541 + ], + [ + 1795, + 373 + ], + [ + 1815, + 366 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1108, + 432 + ], + [ + 1143, + 433 + ], + [ + 1147, + 438 + ], + [ + 1152, + 450 + ], + [ + 1152, + 467 + ], + [ + 1152, + 479 + ], + [ + 1151, + 481 + ], + [ + 1146, + 481 + ], + [ + 1143, + 479 + ], + [ + 1143, + 475 + ], + [ + 1106, + 474 + ], + [ + 1106, + 477 + ], + [ + 1106, + 479 + ], + [ + 1102, + 479 + ], + [ + 1099, + 478 + ], + [ + 1098, + 469 + ], + [ + 1101, + 453 + ], + [ + 1101, + 447 + ], + [ + 1105, + 437 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1171, + 418 + ], + [ + 1176, + 412 + ], + [ + 1170, + 408 + ], + [ + 1166, + 411 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1172, + 398 + ], + [ + 1168, + 400 + ], + [ + 1166, + 403 + ], + [ + 1167, + 407 + ], + [ + 1172, + 409 + ], + [ + 1175, + 407 + ], + [ + 1176, + 402 + ], + [ + 1175, + 399 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1168, + 420 + ], + [ + 1158, + 420 + ], + [ + 1157, + 403 + ], + [ + 1168, + 403 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1185, + 415 + ], + [ + 1186, + 390 + ], + [ + 1182, + 389 + ], + [ + 1182, + 392 + ], + [ + 1177, + 393 + ], + [ + 1177, + 395 + ], + [ + 1181, + 397 + ], + [ + 1177, + 399 + ], + [ + 1178, + 403 + ], + [ + 1182, + 403 + ], + [ + 1182, + 405 + ], + [ + 1178, + 405 + ], + [ + 1179, + 408 + ], + [ + 1182, + 409 + ], + [ + 1183, + 413 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1191, + 394 + ], + [ + 1193, + 397 + ], + [ + 1191, + 401 + ], + [ + 1187, + 403 + ], + [ + 1184, + 401 + ], + [ + 1183, + 397 + ], + [ + 1185, + 394 + ], + [ + 1188, + 394 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1226, + 404 + ], + [ + 1216, + 405 + ], + [ + 1214, + 378 + ], + [ + 1222, + 379 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000152_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000152_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..0c981f2ab2475bc73c379d3998012c83052ba33e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000152_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000152_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000152_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..be8db37c29e6ff49aed0f8cfa09c7ec2bb6874dd --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000152_000019_gtFine_polygons.json @@ -0,0 +1,7703 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 63, + 338 + ], + [ + 2048, + 308 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 318 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 506, + 448 + ], + [ + 540, + 452 + ], + [ + 570, + 456 + ], + [ + 604, + 461 + ], + [ + 649, + 465 + ], + [ + 655, + 460 + ], + [ + 661, + 454 + ], + [ + 655, + 449 + ], + [ + 649, + 443 + ], + [ + 609, + 439 + ], + [ + 549, + 438 + ], + [ + 543, + 438 + ], + [ + 521, + 444 + ], + [ + 514, + 444 + ], + [ + 510, + 446 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 106, + 9 + ], + [ + 241, + 354 + ], + [ + 515, + 340 + ], + [ + 713, + 159 + ], + [ + 757, + 0 + ], + [ + 116, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1376, + 520 + ], + [ + 1240, + 521 + ], + [ + 1111, + 510 + ], + [ + 1004, + 500 + ], + [ + 990, + 498 + ], + [ + 989, + 494 + ], + [ + 972, + 485 + ], + [ + 819, + 473 + ], + [ + 749, + 466 + ], + [ + 969, + 468 + ], + [ + 1192, + 470 + ], + [ + 1468, + 469 + ], + [ + 1448, + 503 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1826, + 649 + ], + [ + 2048, + 717 + ], + [ + 2048, + 605 + ], + [ + 1764, + 547 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 279, + 502 + ], + [ + 115, + 502 + ], + [ + 0, + 500 + ], + [ + 0, + 480 + ], + [ + 116, + 476 + ], + [ + 169, + 436 + ], + [ + 210, + 422 + ], + [ + 270, + 444 + ], + [ + 288, + 458 + ], + [ + 287, + 473 + ], + [ + 282, + 486 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 0 + ], + [ + 162, + 0 + ], + [ + 166, + 13 + ], + [ + 179, + 22 + ], + [ + 192, + 59 + ], + [ + 190, + 70 + ], + [ + 180, + 69 + ], + [ + 181, + 90 + ], + [ + 184, + 92 + ], + [ + 185, + 102 + ], + [ + 192, + 119 + ], + [ + 197, + 119 + ], + [ + 214, + 170 + ], + [ + 321, + 237 + ], + [ + 350, + 237 + ], + [ + 352, + 247 + ], + [ + 370, + 252 + ], + [ + 371, + 263 + ], + [ + 371, + 261 + ], + [ + 391, + 264 + ], + [ + 391, + 261 + ], + [ + 402, + 262 + ], + [ + 402, + 265 + ], + [ + 413, + 265 + ], + [ + 411, + 259 + ], + [ + 454, + 257 + ], + [ + 454, + 272 + ], + [ + 484, + 281 + ], + [ + 496, + 271 + ], + [ + 497, + 196 + ], + [ + 502, + 196 + ], + [ + 503, + 190 + ], + [ + 507, + 189 + ], + [ + 532, + 189 + ], + [ + 645, + 93 + ], + [ + 669, + 58 + ], + [ + 668, + 9 + ], + [ + 671, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 543 + ], + [ + 1445, + 528 + ], + [ + 1379, + 498 + ], + [ + 1334, + 498 + ], + [ + 1302, + 499 + ], + [ + 1282, + 495 + ], + [ + 1140, + 484 + ], + [ + 969, + 470 + ], + [ + 784, + 460 + ], + [ + 671, + 447 + ], + [ + 626, + 447 + ], + [ + 590, + 444 + ], + [ + 543, + 441 + ], + [ + 502, + 433 + ], + [ + 467, + 428 + ], + [ + 448, + 429 + ], + [ + 346, + 433 + ], + [ + 199, + 438 + ], + [ + 111, + 485 + ], + [ + 0, + 483 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 210, + 139 + ], + [ + 215, + 141 + ], + [ + 219, + 148 + ], + [ + 227, + 152 + ], + [ + 235, + 157 + ], + [ + 245, + 155 + ], + [ + 252, + 162 + ], + [ + 261, + 172 + ], + [ + 265, + 179 + ], + [ + 263, + 185 + ], + [ + 259, + 187 + ], + [ + 262, + 190 + ], + [ + 266, + 188 + ], + [ + 271, + 185 + ], + [ + 275, + 192 + ], + [ + 282, + 194 + ], + [ + 289, + 192 + ], + [ + 293, + 191 + ], + [ + 301, + 195 + ], + [ + 298, + 200 + ], + [ + 301, + 206 + ], + [ + 305, + 206 + ], + [ + 310, + 202 + ], + [ + 313, + 205 + ], + [ + 316, + 213 + ], + [ + 321, + 216 + ], + [ + 324, + 221 + ], + [ + 324, + 228 + ], + [ + 325, + 234 + ], + [ + 323, + 242 + ], + [ + 323, + 252 + ], + [ + 325, + 259 + ], + [ + 326, + 259 + ], + [ + 330, + 270 + ], + [ + 330, + 270 + ], + [ + 330, + 270 + ], + [ + 329, + 272 + ], + [ + 331, + 279 + ], + [ + 335, + 280 + ], + [ + 341, + 283 + ], + [ + 344, + 284 + ], + [ + 349, + 287 + ], + [ + 354, + 294 + ], + [ + 347, + 300 + ], + [ + 344, + 306 + ], + [ + 346, + 315 + ], + [ + 349, + 328 + ], + [ + 346, + 336 + ], + [ + 337, + 340 + ], + [ + 327, + 342 + ], + [ + 323, + 345 + ], + [ + 323, + 352 + ], + [ + 315, + 355 + ], + [ + 310, + 351 + ], + [ + 298, + 345 + ], + [ + 297, + 338 + ], + [ + 296, + 336 + ], + [ + 292, + 334 + ], + [ + 290, + 339 + ], + [ + 286, + 342 + ], + [ + 277, + 348 + ], + [ + 272, + 353 + ], + [ + 259, + 358 + ], + [ + 254, + 359 + ], + [ + 244, + 354 + ], + [ + 230, + 338 + ], + [ + 225, + 329 + ], + [ + 217, + 320 + ], + [ + 217, + 326 + ], + [ + 217, + 333 + ], + [ + 213, + 344 + ], + [ + 212, + 356 + ], + [ + 210, + 373 + ], + [ + 209, + 387 + ], + [ + 199, + 389 + ], + [ + 202, + 365 + ], + [ + 203, + 355 + ], + [ + 202, + 347 + ], + [ + 204, + 297 + ], + [ + 206, + 173 + ], + [ + 212, + 167 + ], + [ + 204, + 141 + ], + [ + 207, + 141 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 416, + 357 + ], + [ + 424, + 357 + ], + [ + 424, + 371 + ], + [ + 416, + 371 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 443, + 357 + ], + [ + 443, + 372 + ], + [ + 450, + 372 + ], + [ + 450, + 357 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 502, + 423 + ], + [ + 495, + 424 + ], + [ + 489, + 425 + ], + [ + 487, + 426 + ], + [ + 483, + 427 + ], + [ + 477, + 430 + ], + [ + 474, + 434 + ], + [ + 473, + 439 + ], + [ + 475, + 441 + ], + [ + 477, + 443 + ], + [ + 483, + 444 + ], + [ + 485, + 445 + ], + [ + 491, + 445 + ], + [ + 495, + 445 + ], + [ + 502, + 444 + ], + [ + 505, + 444 + ], + [ + 509, + 443 + ], + [ + 514, + 440 + ], + [ + 516, + 437 + ], + [ + 516, + 433 + ], + [ + 514, + 430 + ], + [ + 509, + 424 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 662, + 60 + ], + [ + 655, + 58 + ], + [ + 650, + 65 + ], + [ + 648, + 70 + ], + [ + 642, + 69 + ], + [ + 627, + 51 + ], + [ + 622, + 66 + ], + [ + 618, + 75 + ], + [ + 615, + 71 + ], + [ + 613, + 75 + ], + [ + 613, + 83 + ], + [ + 614, + 89 + ], + [ + 611, + 92 + ], + [ + 607, + 85 + ], + [ + 604, + 77 + ], + [ + 600, + 67 + ], + [ + 592, + 55 + ], + [ + 592, + 66 + ], + [ + 588, + 72 + ], + [ + 588, + 79 + ], + [ + 580, + 77 + ], + [ + 575, + 81 + ], + [ + 578, + 87 + ], + [ + 580, + 92 + ], + [ + 568, + 91 + ], + [ + 560, + 91 + ], + [ + 555, + 96 + ], + [ + 553, + 105 + ], + [ + 550, + 108 + ], + [ + 548, + 101 + ], + [ + 542, + 105 + ], + [ + 540, + 110 + ], + [ + 538, + 123 + ], + [ + 534, + 132 + ], + [ + 533, + 137 + ], + [ + 530, + 148 + ], + [ + 528, + 156 + ], + [ + 530, + 160 + ], + [ + 534, + 169 + ], + [ + 536, + 172 + ], + [ + 532, + 174 + ], + [ + 528, + 173 + ], + [ + 524, + 176 + ], + [ + 526, + 182 + ], + [ + 526, + 186 + ], + [ + 522, + 191 + ], + [ + 517, + 192 + ], + [ + 515, + 195 + ], + [ + 516, + 200 + ], + [ + 517, + 207 + ], + [ + 521, + 218 + ], + [ + 521, + 220 + ], + [ + 518, + 219 + ], + [ + 513, + 214 + ], + [ + 502, + 207 + ], + [ + 500, + 208 + ], + [ + 493, + 213 + ], + [ + 493, + 216 + ], + [ + 495, + 217 + ], + [ + 493, + 222 + ], + [ + 487, + 225 + ], + [ + 482, + 228 + ], + [ + 484, + 233 + ], + [ + 486, + 236 + ], + [ + 493, + 235 + ], + [ + 496, + 236 + ], + [ + 493, + 239 + ], + [ + 486, + 241 + ], + [ + 484, + 248 + ], + [ + 495, + 250 + ], + [ + 496, + 251 + ], + [ + 494, + 262 + ], + [ + 491, + 268 + ], + [ + 481, + 254 + ], + [ + 485, + 268 + ], + [ + 483, + 274 + ], + [ + 480, + 276 + ], + [ + 476, + 274 + ], + [ + 471, + 270 + ], + [ + 467, + 269 + ], + [ + 457, + 273 + ], + [ + 450, + 274 + ], + [ + 449, + 279 + ], + [ + 449, + 283 + ], + [ + 442, + 291 + ], + [ + 441, + 285 + ], + [ + 437, + 285 + ], + [ + 434, + 289 + ], + [ + 437, + 294 + ], + [ + 430, + 296 + ], + [ + 425, + 304 + ], + [ + 416, + 307 + ], + [ + 422, + 308 + ], + [ + 430, + 305 + ], + [ + 426, + 313 + ], + [ + 423, + 317 + ], + [ + 420, + 327 + ], + [ + 424, + 333 + ], + [ + 427, + 337 + ], + [ + 433, + 339 + ], + [ + 440, + 337 + ], + [ + 446, + 333 + ], + [ + 451, + 329 + ], + [ + 453, + 324 + ], + [ + 456, + 326 + ], + [ + 451, + 331 + ], + [ + 452, + 338 + ], + [ + 453, + 341 + ], + [ + 457, + 342 + ], + [ + 465, + 345 + ], + [ + 469, + 346 + ], + [ + 472, + 350 + ], + [ + 478, + 362 + ], + [ + 488, + 371 + ], + [ + 492, + 386 + ], + [ + 496, + 401 + ], + [ + 500, + 422 + ], + [ + 501, + 436 + ], + [ + 505, + 437 + ], + [ + 504, + 431 + ], + [ + 503, + 405 + ], + [ + 498, + 389 + ], + [ + 498, + 384 + ], + [ + 500, + 375 + ], + [ + 504, + 366 + ], + [ + 503, + 357 + ], + [ + 507, + 351 + ], + [ + 509, + 346 + ], + [ + 518, + 346 + ], + [ + 526, + 350 + ], + [ + 528, + 358 + ], + [ + 526, + 370 + ], + [ + 525, + 383 + ], + [ + 526, + 443 + ], + [ + 532, + 443 + ], + [ + 532, + 433 + ], + [ + 532, + 397 + ], + [ + 532, + 370 + ], + [ + 539, + 356 + ], + [ + 543, + 347 + ], + [ + 549, + 344 + ], + [ + 552, + 344 + ], + [ + 553, + 341 + ], + [ + 551, + 338 + ], + [ + 553, + 334 + ], + [ + 555, + 331 + ], + [ + 557, + 325 + ], + [ + 557, + 320 + ], + [ + 560, + 318 + ], + [ + 561, + 322 + ], + [ + 560, + 335 + ], + [ + 560, + 346 + ], + [ + 562, + 353 + ], + [ + 564, + 353 + ], + [ + 568, + 355 + ], + [ + 568, + 425 + ], + [ + 574, + 429 + ], + [ + 575, + 419 + ], + [ + 576, + 354 + ], + [ + 579, + 351 + ], + [ + 580, + 349 + ], + [ + 580, + 346 + ], + [ + 586, + 346 + ], + [ + 586, + 383 + ], + [ + 579, + 424 + ], + [ + 579, + 433 + ], + [ + 586, + 432 + ], + [ + 587, + 426 + ], + [ + 592, + 389 + ], + [ + 599, + 354 + ], + [ + 600, + 345 + ], + [ + 608, + 333 + ], + [ + 611, + 333 + ], + [ + 613, + 339 + ], + [ + 614, + 343 + ], + [ + 620, + 347 + ], + [ + 623, + 346 + ], + [ + 626, + 345 + ], + [ + 631, + 347 + ], + [ + 635, + 356 + ], + [ + 640, + 403 + ], + [ + 642, + 433 + ], + [ + 645, + 450 + ], + [ + 649, + 450 + ], + [ + 650, + 450 + ], + [ + 654, + 447 + ], + [ + 653, + 437 + ], + [ + 647, + 383 + ], + [ + 647, + 357 + ], + [ + 651, + 332 + ], + [ + 656, + 325 + ], + [ + 664, + 330 + ], + [ + 671, + 329 + ], + [ + 680, + 333 + ], + [ + 681, + 329 + ], + [ + 680, + 324 + ], + [ + 692, + 328 + ], + [ + 698, + 328 + ], + [ + 701, + 326 + ], + [ + 697, + 322 + ], + [ + 695, + 319 + ], + [ + 690, + 307 + ], + [ + 707, + 309 + ], + [ + 708, + 306 + ], + [ + 705, + 306 + ], + [ + 715, + 305 + ], + [ + 723, + 301 + ], + [ + 725, + 297 + ], + [ + 730, + 294 + ], + [ + 732, + 287 + ], + [ + 726, + 289 + ], + [ + 718, + 287 + ], + [ + 711, + 287 + ], + [ + 708, + 287 + ], + [ + 712, + 280 + ], + [ + 710, + 272 + ], + [ + 709, + 270 + ], + [ + 712, + 262 + ], + [ + 721, + 258 + ], + [ + 724, + 254 + ], + [ + 716, + 249 + ], + [ + 722, + 247 + ], + [ + 729, + 240 + ], + [ + 723, + 234 + ], + [ + 725, + 230 + ], + [ + 738, + 229 + ], + [ + 738, + 225 + ], + [ + 727, + 224 + ], + [ + 731, + 220 + ], + [ + 738, + 216 + ], + [ + 739, + 210 + ], + [ + 728, + 204 + ], + [ + 725, + 207 + ], + [ + 722, + 211 + ], + [ + 719, + 210 + ], + [ + 713, + 208 + ], + [ + 718, + 203 + ], + [ + 719, + 199 + ], + [ + 712, + 197 + ], + [ + 704, + 191 + ], + [ + 699, + 183 + ], + [ + 710, + 180 + ], + [ + 717, + 175 + ], + [ + 722, + 170 + ], + [ + 727, + 160 + ], + [ + 726, + 148 + ], + [ + 718, + 149 + ], + [ + 715, + 144 + ], + [ + 718, + 139 + ], + [ + 721, + 137 + ], + [ + 725, + 133 + ], + [ + 723, + 130 + ], + [ + 718, + 124 + ], + [ + 716, + 114 + ], + [ + 716, + 112 + ], + [ + 710, + 118 + ], + [ + 705, + 123 + ], + [ + 700, + 123 + ], + [ + 692, + 118 + ], + [ + 689, + 117 + ], + [ + 680, + 122 + ], + [ + 675, + 125 + ], + [ + 671, + 122 + ], + [ + 671, + 114 + ], + [ + 672, + 107 + ], + [ + 674, + 97 + ], + [ + 677, + 85 + ], + [ + 672, + 71 + ], + [ + 664, + 62 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 456, + 193 + ], + [ + 456, + 200 + ], + [ + 478, + 201 + ], + [ + 478, + 195 + ], + [ + 471, + 195 + ], + [ + 468, + 190 + ], + [ + 465, + 193 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 473, + 437 + ], + [ + 451, + 438 + ], + [ + 446, + 435 + ], + [ + 447, + 431 + ], + [ + 448, + 428 + ], + [ + 450, + 426 + ], + [ + 456, + 422 + ], + [ + 462, + 421 + ], + [ + 469, + 421 + ], + [ + 474, + 422 + ], + [ + 475, + 428 + ], + [ + 474, + 434 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 521, + 444 + ], + [ + 525, + 440 + ], + [ + 523, + 435 + ], + [ + 517, + 431 + ], + [ + 512, + 429 + ], + [ + 504, + 429 + ], + [ + 499, + 431 + ], + [ + 495, + 437 + ], + [ + 494, + 442 + ], + [ + 498, + 445 + ], + [ + 510, + 445 + ], + [ + 512, + 445 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 483, + 401 + ], + [ + 483, + 416 + ], + [ + 492, + 416 + ], + [ + 491, + 401 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 546, + 394 + ], + [ + 547, + 409 + ], + [ + 556, + 410 + ], + [ + 555, + 394 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 570, + 450 + ], + [ + 566, + 449 + ], + [ + 559, + 454 + ], + [ + 556, + 455 + ], + [ + 555, + 452 + ], + [ + 555, + 444 + ], + [ + 554, + 440 + ], + [ + 556, + 434 + ], + [ + 556, + 426 + ], + [ + 560, + 417 + ], + [ + 566, + 422 + ], + [ + 571, + 417 + ], + [ + 571, + 415 + ], + [ + 575, + 419 + ], + [ + 579, + 423 + ], + [ + 585, + 425 + ], + [ + 589, + 428 + ], + [ + 591, + 431 + ], + [ + 593, + 435 + ], + [ + 594, + 442 + ], + [ + 593, + 446 + ], + [ + 592, + 448 + ], + [ + 583, + 449 + ], + [ + 577, + 450 + ], + [ + 573, + 450 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 534, + 400 + ], + [ + 531, + 398 + ], + [ + 531, + 411 + ], + [ + 534, + 411 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 477, + 346 + ], + [ + 500, + 347 + ], + [ + 529, + 354 + ], + [ + 532, + 359 + ], + [ + 533, + 395 + ], + [ + 535, + 447 + ], + [ + 536, + 447 + ], + [ + 533, + 358 + ], + [ + 530, + 353 + ], + [ + 500, + 345 + ], + [ + 475, + 344 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 470, + 338 + ], + [ + 470, + 358 + ], + [ + 480, + 358 + ], + [ + 479, + 339 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 595, + 296 + ], + [ + 592, + 455 + ], + [ + 597, + 455 + ], + [ + 600, + 296 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 590, + 352 + ], + [ + 590, + 391 + ], + [ + 616, + 392 + ], + [ + 616, + 353 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 580, + 434 + ], + [ + 581, + 454 + ], + [ + 583, + 454 + ], + [ + 583, + 434 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 610, + 465 + ], + [ + 611, + 457 + ], + [ + 611, + 451 + ], + [ + 610, + 445 + ], + [ + 610, + 443 + ], + [ + 610, + 440 + ], + [ + 608, + 437 + ], + [ + 604, + 437 + ], + [ + 603, + 443 + ], + [ + 603, + 454 + ], + [ + 601, + 458 + ], + [ + 606, + 459 + ], + [ + 607, + 462 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 601, + 421 + ], + [ + 595, + 429 + ], + [ + 594, + 432 + ], + [ + 594, + 435 + ], + [ + 598, + 436 + ], + [ + 598, + 435 + ], + [ + 600, + 442 + ], + [ + 600, + 447 + ], + [ + 600, + 456 + ], + [ + 600, + 459 + ], + [ + 600, + 462 + ], + [ + 602, + 462 + ], + [ + 603, + 459 + ], + [ + 604, + 447 + ], + [ + 604, + 442 + ], + [ + 605, + 441 + ], + [ + 607, + 441 + ], + [ + 608, + 444 + ], + [ + 611, + 446 + ], + [ + 612, + 445 + ], + [ + 612, + 437 + ], + [ + 613, + 426 + ], + [ + 608, + 421 + ], + [ + 606, + 414 + ], + [ + 602, + 412 + ], + [ + 599, + 416 + ], + [ + 600, + 418 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 655, + 373 + ], + [ + 651, + 374 + ], + [ + 649, + 377 + ], + [ + 649, + 380 + ], + [ + 649, + 384 + ], + [ + 654, + 387 + ], + [ + 655, + 387 + ], + [ + 659, + 385 + ], + [ + 662, + 382 + ], + [ + 662, + 376 + ], + [ + 659, + 374 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 681, + 383 + ], + [ + 683, + 437 + ], + [ + 689, + 438 + ], + [ + 691, + 429 + ], + [ + 688, + 379 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 662, + 338 + ], + [ + 663, + 391 + ], + [ + 716, + 391 + ], + [ + 716, + 338 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 677, + 417 + ], + [ + 677, + 432 + ], + [ + 679, + 432 + ], + [ + 679, + 416 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 673, + 394 + ], + [ + 672, + 421 + ], + [ + 683, + 421 + ], + [ + 682, + 393 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 622, + 459 + ], + [ + 626, + 454 + ], + [ + 627, + 449 + ], + [ + 628, + 447 + ], + [ + 632, + 445 + ], + [ + 636, + 442 + ], + [ + 639, + 432 + ], + [ + 641, + 429 + ], + [ + 646, + 430 + ], + [ + 647, + 430 + ], + [ + 652, + 432 + ], + [ + 652, + 436 + ], + [ + 653, + 444 + ], + [ + 654, + 451 + ], + [ + 653, + 454 + ], + [ + 653, + 455 + ], + [ + 650, + 457 + ], + [ + 644, + 457 + ], + [ + 637, + 459 + ], + [ + 632, + 461 + ], + [ + 629, + 462 + ], + [ + 625, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 695, + 435 + ], + [ + 681, + 430 + ], + [ + 667, + 430 + ], + [ + 657, + 433 + ], + [ + 653, + 440 + ], + [ + 648, + 446 + ], + [ + 648, + 454 + ], + [ + 648, + 462 + ], + [ + 648, + 464 + ], + [ + 651, + 466 + ], + [ + 654, + 466 + ], + [ + 656, + 466 + ], + [ + 663, + 467 + ], + [ + 665, + 468 + ], + [ + 676, + 465 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 713, + 430 + ], + [ + 697, + 428 + ], + [ + 686, + 429 + ], + [ + 676, + 434 + ], + [ + 670, + 439 + ], + [ + 662, + 445 + ], + [ + 658, + 451 + ], + [ + 657, + 459 + ], + [ + 658, + 463 + ], + [ + 659, + 466 + ], + [ + 664, + 467 + ], + [ + 667, + 468 + ], + [ + 671, + 468 + ], + [ + 676, + 469 + ], + [ + 682, + 469 + ], + [ + 690, + 467 + ], + [ + 696, + 466 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 747, + 428 + ], + [ + 716, + 429 + ], + [ + 704, + 434 + ], + [ + 697, + 439 + ], + [ + 692, + 450 + ], + [ + 689, + 461 + ], + [ + 691, + 468 + ], + [ + 693, + 471 + ], + [ + 698, + 472 + ], + [ + 699, + 470 + ], + [ + 704, + 471 + ], + [ + 707, + 472 + ], + [ + 712, + 472 + ], + [ + 716, + 470 + ], + [ + 720, + 469 + ], + [ + 731, + 467 + ], + [ + 745, + 448 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 786, + 427 + ], + [ + 758, + 426 + ], + [ + 747, + 426 + ], + [ + 740, + 432 + ], + [ + 732, + 441 + ], + [ + 731, + 444 + ], + [ + 728, + 450 + ], + [ + 727, + 455 + ], + [ + 725, + 463 + ], + [ + 725, + 471 + ], + [ + 729, + 475 + ], + [ + 735, + 475 + ], + [ + 735, + 471 + ], + [ + 739, + 472 + ], + [ + 742, + 472 + ], + [ + 744, + 476 + ], + [ + 752, + 477 + ], + [ + 755, + 472 + ], + [ + 757, + 471 + ], + [ + 761, + 470 + ], + [ + 767, + 470 + ], + [ + 779, + 459 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 829, + 427 + ], + [ + 809, + 425 + ], + [ + 786, + 427 + ], + [ + 777, + 433 + ], + [ + 770, + 444 + ], + [ + 765, + 455 + ], + [ + 765, + 469 + ], + [ + 765, + 474 + ], + [ + 768, + 479 + ], + [ + 774, + 479 + ], + [ + 776, + 475 + ], + [ + 781, + 475 + ], + [ + 785, + 480 + ], + [ + 788, + 481 + ], + [ + 791, + 480 + ], + [ + 792, + 478 + ], + [ + 794, + 474 + ], + [ + 801, + 474 + ], + [ + 808, + 474 + ], + [ + 812, + 473 + ], + [ + 815, + 474 + ], + [ + 816, + 479 + ], + [ + 820, + 480 + ], + [ + 825, + 478 + ], + [ + 825, + 475 + ], + [ + 829, + 472 + ], + [ + 835, + 467 + ], + [ + 843, + 455 + ], + [ + 843, + 445 + ], + [ + 836, + 435 + ], + [ + 832, + 430 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 914, + 426 + ], + [ + 876, + 426 + ], + [ + 852, + 433 + ], + [ + 839, + 445 + ], + [ + 832, + 455 + ], + [ + 829, + 469 + ], + [ + 830, + 481 + ], + [ + 832, + 486 + ], + [ + 840, + 486 + ], + [ + 843, + 486 + ], + [ + 845, + 481 + ], + [ + 855, + 480 + ], + [ + 857, + 484 + ], + [ + 859, + 486 + ], + [ + 864, + 488 + ], + [ + 867, + 486 + ], + [ + 868, + 485 + ], + [ + 870, + 482 + ], + [ + 877, + 479 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 957, + 427 + ], + [ + 927, + 426 + ], + [ + 915, + 426 + ], + [ + 901, + 428 + ], + [ + 891, + 439 + ], + [ + 887, + 442 + ], + [ + 881, + 445 + ], + [ + 879, + 455 + ], + [ + 878, + 461 + ], + [ + 875, + 470 + ], + [ + 877, + 479 + ], + [ + 877, + 486 + ], + [ + 879, + 490 + ], + [ + 886, + 491 + ], + [ + 889, + 490 + ], + [ + 891, + 487 + ], + [ + 897, + 487 + ], + [ + 901, + 487 + ], + [ + 904, + 487 + ], + [ + 910, + 492 + ], + [ + 916, + 492 + ], + [ + 919, + 489 + ], + [ + 925, + 487 + ], + [ + 931, + 486 + ], + [ + 934, + 486 + ], + [ + 938, + 490 + ], + [ + 943, + 491 + ], + [ + 948, + 489 + ], + [ + 951, + 486 + ], + [ + 963, + 486 + ], + [ + 965, + 486 + ], + [ + 968, + 490 + ], + [ + 969, + 491 + ], + [ + 977, + 493 + ], + [ + 981, + 488 + ], + [ + 985, + 476 + ], + [ + 983, + 464 + ], + [ + 978, + 454 + ], + [ + 975, + 442 + ], + [ + 967, + 430 + ], + [ + 962, + 427 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1052, + 392 + ], + [ + 1052, + 429 + ], + [ + 1052, + 449 + ], + [ + 1056, + 450 + ], + [ + 1056, + 388 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1044, + 344 + ], + [ + 1044, + 395 + ], + [ + 1062, + 395 + ], + [ + 1062, + 344 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1045, + 437 + ], + [ + 1037, + 441 + ], + [ + 1026, + 446 + ], + [ + 1027, + 457 + ], + [ + 1033, + 467 + ], + [ + 1038, + 479 + ], + [ + 1040, + 486 + ], + [ + 1044, + 490 + ], + [ + 1052, + 491 + ], + [ + 1057, + 492 + ], + [ + 1064, + 489 + ], + [ + 1067, + 487 + ], + [ + 1070, + 484 + ], + [ + 1072, + 476 + ], + [ + 1072, + 471 + ], + [ + 1063, + 452 + ], + [ + 1054, + 444 + ], + [ + 1049, + 440 + ], + [ + 1049, + 437 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1079, + 402 + ], + [ + 1074, + 407 + ], + [ + 1073, + 410 + ], + [ + 1067, + 416 + ], + [ + 1063, + 425 + ], + [ + 1062, + 442 + ], + [ + 1066, + 471 + ], + [ + 1066, + 482 + ], + [ + 1064, + 487 + ], + [ + 1064, + 489 + ], + [ + 1069, + 492 + ], + [ + 1072, + 491 + ], + [ + 1073, + 488 + ], + [ + 1073, + 470 + ], + [ + 1075, + 459 + ], + [ + 1079, + 453 + ], + [ + 1084, + 449 + ], + [ + 1089, + 426 + ], + [ + 1086, + 416 + ], + [ + 1083, + 405 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1093, + 407 + ], + [ + 1085, + 418 + ], + [ + 1077, + 438 + ], + [ + 1076, + 448 + ], + [ + 1079, + 456 + ], + [ + 1078, + 466 + ], + [ + 1082, + 484 + ], + [ + 1083, + 488 + ], + [ + 1083, + 491 + ], + [ + 1087, + 489 + ], + [ + 1090, + 486 + ], + [ + 1090, + 482 + ], + [ + 1096, + 470 + ], + [ + 1098, + 460 + ], + [ + 1101, + 452 + ], + [ + 1105, + 445 + ], + [ + 1108, + 440 + ], + [ + 1107, + 432 + ], + [ + 1103, + 425 + ], + [ + 1102, + 417 + ], + [ + 1101, + 410 + ], + [ + 1097, + 405 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1071, + 444 + ], + [ + 1051, + 443 + ], + [ + 1052, + 453 + ], + [ + 1055, + 472 + ], + [ + 1059, + 473 + ], + [ + 1065, + 472 + ], + [ + 1069, + 470 + ], + [ + 1073, + 451 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1141, + 254 + ], + [ + 1091, + 253 + ], + [ + 1091, + 303 + ], + [ + 1143, + 307 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1102, + 276 + ], + [ + 1099, + 502 + ], + [ + 1089, + 502 + ], + [ + 1094, + 278 + ], + [ + 1094, + 277 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1104, + 338 + ], + [ + 1106, + 333 + ], + [ + 1110, + 329 + ], + [ + 1116, + 329 + ], + [ + 1118, + 331 + ], + [ + 1122, + 337 + ], + [ + 1124, + 343 + ], + [ + 1124, + 349 + ], + [ + 1121, + 357 + ], + [ + 1117, + 361 + ], + [ + 1114, + 361 + ], + [ + 1108, + 360 + ], + [ + 1106, + 358 + ], + [ + 1101, + 352 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1106, + 322 + ], + [ + 1105, + 372 + ], + [ + 1094, + 373 + ], + [ + 1094, + 365 + ], + [ + 1086, + 362 + ], + [ + 1085, + 355 + ], + [ + 1094, + 355 + ], + [ + 1094, + 351 + ], + [ + 1085, + 348 + ], + [ + 1084, + 340 + ], + [ + 1095, + 340 + ], + [ + 1094, + 335 + ], + [ + 1085, + 333 + ], + [ + 1085, + 327 + ], + [ + 1096, + 325 + ], + [ + 1098, + 322 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 996, + 441 + ], + [ + 988, + 438 + ], + [ + 988, + 438 + ], + [ + 987, + 440 + ], + [ + 987, + 443 + ], + [ + 992, + 445 + ], + [ + 992, + 447 + ], + [ + 991, + 452 + ], + [ + 993, + 453 + ], + [ + 1003, + 450 + ], + [ + 999, + 469 + ], + [ + 997, + 482 + ], + [ + 995, + 495 + ], + [ + 994, + 504 + ], + [ + 999, + 514 + ], + [ + 1001, + 520 + ], + [ + 1003, + 525 + ], + [ + 1007, + 527 + ], + [ + 1009, + 526 + ], + [ + 1011, + 524 + ], + [ + 1015, + 522 + ], + [ + 1016, + 524 + ], + [ + 1018, + 528 + ], + [ + 1020, + 529 + ], + [ + 1022, + 530 + ], + [ + 1027, + 530 + ], + [ + 1029, + 525 + ], + [ + 1030, + 521 + ], + [ + 1033, + 519 + ], + [ + 1036, + 515 + ], + [ + 1040, + 513 + ], + [ + 1042, + 511 + ], + [ + 1040, + 505 + ], + [ + 1038, + 501 + ], + [ + 1040, + 491 + ], + [ + 1041, + 485 + ], + [ + 1039, + 475 + ], + [ + 1030, + 463 + ], + [ + 1029, + 448 + ], + [ + 1036, + 444 + ], + [ + 1043, + 442 + ], + [ + 1047, + 440 + ], + [ + 1047, + 436 + ], + [ + 1045, + 435 + ], + [ + 1039, + 434 + ], + [ + 1028, + 434 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1411, + 324 + ], + [ + 1395, + 324 + ], + [ + 1394, + 383 + ], + [ + 1411, + 382 + ], + [ + 1414, + 375 + ], + [ + 1426, + 373 + ], + [ + 1426, + 365 + ], + [ + 1412, + 363 + ], + [ + 1412, + 358 + ], + [ + 1426, + 356 + ], + [ + 1426, + 347 + ], + [ + 1412, + 348 + ], + [ + 1412, + 341 + ], + [ + 1427, + 337 + ], + [ + 1426, + 331 + ], + [ + 1412, + 331 + ] + ] + }, + { + "label": "rider", + "polygon": [ + [ + 1012, + 394 + ], + [ + 1004, + 398 + ], + [ + 1002, + 407 + ], + [ + 1002, + 414 + ], + [ + 1002, + 418 + ], + [ + 995, + 433 + ], + [ + 993, + 445 + ], + [ + 987, + 450 + ], + [ + 993, + 455 + ], + [ + 996, + 454 + ], + [ + 997, + 452 + ], + [ + 1001, + 457 + ], + [ + 997, + 464 + ], + [ + 994, + 479 + ], + [ + 994, + 494 + ], + [ + 995, + 500 + ], + [ + 996, + 500 + ], + [ + 998, + 498 + ], + [ + 1001, + 491 + ], + [ + 1003, + 476 + ], + [ + 1010, + 469 + ], + [ + 1023, + 466 + ], + [ + 1029, + 466 + ], + [ + 1033, + 466 + ], + [ + 1033, + 462 + ], + [ + 1032, + 452 + ], + [ + 1032, + 450 + ], + [ + 1039, + 448 + ], + [ + 1042, + 447 + ], + [ + 1043, + 445 + ], + [ + 1042, + 442 + ], + [ + 1036, + 426 + ], + [ + 1031, + 418 + ], + [ + 1026, + 413 + ], + [ + 1024, + 403 + ], + [ + 1021, + 397 + ], + [ + 1016, + 394 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1495, + 102 + ], + [ + 1485, + 106 + ], + [ + 1461, + 116 + ], + [ + 1406, + 163 + ], + [ + 1394, + 191 + ], + [ + 1393, + 215 + ], + [ + 1392, + 319 + ], + [ + 1388, + 459 + ], + [ + 1389, + 495 + ], + [ + 1391, + 495 + ], + [ + 1395, + 462 + ], + [ + 1399, + 193 + ], + [ + 1409, + 168 + ], + [ + 1462, + 120 + ], + [ + 1495, + 108 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1489, + 94 + ], + [ + 1487, + 157 + ], + [ + 1499, + 157 + ], + [ + 1500, + 165 + ], + [ + 1508, + 161 + ], + [ + 1510, + 150 + ], + [ + 1521, + 146 + ], + [ + 1521, + 140 + ], + [ + 1509, + 139 + ], + [ + 1510, + 132 + ], + [ + 1521, + 127 + ], + [ + 1521, + 122 + ], + [ + 1511, + 120 + ], + [ + 1511, + 113 + ], + [ + 1523, + 109 + ], + [ + 1523, + 102 + ], + [ + 1512, + 100 + ], + [ + 1512, + 88 + ], + [ + 1502, + 95 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1380, + 331 + ], + [ + 1366, + 332 + ], + [ + 1366, + 335 + ], + [ + 1378, + 334 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1384, + 325 + ], + [ + 1375, + 325 + ], + [ + 1374, + 349 + ], + [ + 1373, + 380 + ], + [ + 1391, + 381 + ], + [ + 1392, + 326 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1370, + 329 + ], + [ + 1364, + 315 + ], + [ + 1360, + 314 + ], + [ + 1360, + 337 + ], + [ + 1361, + 354 + ], + [ + 1363, + 354 + ], + [ + 1367, + 346 + ], + [ + 1370, + 342 + ], + [ + 1370, + 337 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1600, + 135 + ], + [ + 1597, + 136 + ], + [ + 1582, + 134 + ], + [ + 1581, + 140 + ], + [ + 1573, + 141 + ], + [ + 1570, + 144 + ], + [ + 1566, + 153 + ], + [ + 1567, + 156 + ], + [ + 1571, + 160 + ], + [ + 1571, + 172 + ], + [ + 1567, + 175 + ], + [ + 1565, + 182 + ], + [ + 1567, + 187 + ], + [ + 1571, + 188 + ], + [ + 1571, + 201 + ], + [ + 1567, + 204 + ], + [ + 1565, + 209 + ], + [ + 1566, + 213 + ], + [ + 1567, + 216 + ], + [ + 1571, + 217 + ], + [ + 1572, + 226 + ], + [ + 1580, + 226 + ], + [ + 1580, + 231 + ], + [ + 1593, + 231 + ], + [ + 1593, + 227 + ], + [ + 1605, + 233 + ], + [ + 1602, + 213 + ], + [ + 1597, + 208 + ], + [ + 1599, + 141 + ], + [ + 1607, + 140 + ], + [ + 1607, + 134 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1594, + 322 + ], + [ + 1583, + 323 + ], + [ + 1583, + 318 + ], + [ + 1576, + 317 + ], + [ + 1577, + 311 + ], + [ + 1559, + 307 + ], + [ + 1557, + 295 + ], + [ + 1576, + 293 + ], + [ + 1578, + 284 + ], + [ + 1558, + 279 + ], + [ + 1558, + 266 + ], + [ + 1578, + 265 + ], + [ + 1580, + 257 + ], + [ + 1559, + 250 + ], + [ + 1559, + 238 + ], + [ + 1583, + 234 + ], + [ + 1585, + 226 + ], + [ + 1603, + 228 + ], + [ + 1600, + 322 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1619, + 226 + ], + [ + 1622, + 221 + ], + [ + 1626, + 219 + ], + [ + 1634, + 221 + ], + [ + 1640, + 226 + ], + [ + 1643, + 237 + ], + [ + 1645, + 244 + ], + [ + 1647, + 252 + ], + [ + 1646, + 258 + ], + [ + 1640, + 266 + ], + [ + 1631, + 271 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1661, + 225 + ], + [ + 1631, + 255 + ], + [ + 1662, + 286 + ], + [ + 1693, + 254 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1637, + 193 + ], + [ + 1608, + 193 + ], + [ + 1607, + 199 + ], + [ + 1637, + 198 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1668, + 163 + ], + [ + 1655, + 165 + ], + [ + 1642, + 172 + ], + [ + 1636, + 182 + ], + [ + 1632, + 195 + ], + [ + 1633, + 205 + ], + [ + 1639, + 216 + ], + [ + 1647, + 222 + ], + [ + 1654, + 225 + ], + [ + 1662, + 227 + ], + [ + 1671, + 227 + ], + [ + 1682, + 222 + ], + [ + 1689, + 215 + ], + [ + 1694, + 208 + ], + [ + 1696, + 199 + ], + [ + 1696, + 187 + ], + [ + 1692, + 176 + ], + [ + 1682, + 168 + ], + [ + 1671, + 163 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1617, + 308 + ], + [ + 1594, + 332 + ], + [ + 1594, + 356 + ], + [ + 1602, + 365 + ], + [ + 1617, + 370 + ], + [ + 1627, + 371 + ], + [ + 1627, + 312 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1640, + 299 + ], + [ + 1650, + 299 + ], + [ + 1651, + 302 + ], + [ + 1658, + 303 + ], + [ + 1659, + 310 + ], + [ + 1667, + 310 + ], + [ + 1667, + 318 + ], + [ + 1657, + 321 + ], + [ + 1657, + 330 + ], + [ + 1665, + 330 + ], + [ + 1666, + 337 + ], + [ + 1657, + 339 + ], + [ + 1658, + 350 + ], + [ + 1667, + 355 + ], + [ + 1666, + 361 + ], + [ + 1657, + 363 + ], + [ + 1657, + 368 + ], + [ + 1639, + 370 + ], + [ + 1630, + 374 + ], + [ + 1631, + 368 + ], + [ + 1637, + 367 + ], + [ + 1639, + 311 + ], + [ + 1627, + 305 + ], + [ + 1632, + 301 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1624, + 313 + ], + [ + 1622, + 400 + ], + [ + 1634, + 400 + ], + [ + 1632, + 308 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1600, + 15 + ], + [ + 1606, + 35 + ], + [ + 1605, + 66 + ], + [ + 1603, + 143 + ], + [ + 1602, + 200 + ], + [ + 1597, + 218 + ], + [ + 1598, + 324 + ], + [ + 1597, + 392 + ], + [ + 1609, + 391 + ], + [ + 1612, + 218 + ], + [ + 1613, + 206 + ], + [ + 1609, + 198 + ], + [ + 1613, + 35 + ], + [ + 1609, + 14 + ], + [ + 1600, + 0 + ], + [ + 1588, + 0 + ], + [ + 1595, + 6 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1634, + 230 + ], + [ + 1635, + 314 + ], + [ + 1626, + 316 + ], + [ + 1627, + 321 + ], + [ + 1606, + 320 + ], + [ + 1606, + 228 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1634, + 137 + ], + [ + 1607, + 146 + ], + [ + 1607, + 172 + ], + [ + 1634, + 165 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1844, + 12 + ], + [ + 1828, + 410 + ], + [ + 1844, + 411 + ], + [ + 1855, + 0 + ], + [ + 1844, + 0 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1916, + 18 + ], + [ + 1911, + 41 + ], + [ + 1897, + 52 + ], + [ + 1892, + 71 + ], + [ + 1889, + 101 + ], + [ + 1881, + 126 + ], + [ + 1891, + 160 + ], + [ + 1900, + 215 + ], + [ + 1907, + 297 + ], + [ + 1908, + 423 + ], + [ + 1909, + 447 + ], + [ + 1902, + 452 + ], + [ + 1873, + 450 + ], + [ + 1862, + 443 + ], + [ + 1848, + 436 + ], + [ + 1837, + 440 + ], + [ + 1837, + 464 + ], + [ + 1829, + 470 + ], + [ + 1831, + 485 + ], + [ + 1816, + 498 + ], + [ + 1802, + 534 + ], + [ + 1804, + 611 + ], + [ + 1837, + 610 + ], + [ + 1861, + 605 + ], + [ + 1894, + 599 + ], + [ + 1958, + 611 + ], + [ + 1989, + 608 + ], + [ + 2008, + 610 + ], + [ + 2039, + 612 + ], + [ + 2048, + 615 + ], + [ + 2048, + 440 + ], + [ + 2044, + 432 + ], + [ + 2042, + 437 + ], + [ + 2025, + 442 + ], + [ + 2012, + 434 + ], + [ + 2005, + 415 + ], + [ + 1982, + 406 + ], + [ + 1975, + 401 + ], + [ + 1961, + 213 + ], + [ + 1967, + 206 + ], + [ + 1970, + 195 + ], + [ + 1969, + 185 + ], + [ + 1962, + 174 + ], + [ + 1962, + 119 + ], + [ + 1965, + 81 + ], + [ + 1973, + 56 + ], + [ + 1989, + 41 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1745, + 403 + ], + [ + 1636, + 388 + ], + [ + 1581, + 387 + ], + [ + 1514, + 387 + ], + [ + 1463, + 393 + ], + [ + 1430, + 421 + ], + [ + 1402, + 461 + ], + [ + 1396, + 475 + ], + [ + 1395, + 461 + ], + [ + 1388, + 457 + ], + [ + 1375, + 458 + ], + [ + 1366, + 461 + ], + [ + 1365, + 469 + ], + [ + 1364, + 475 + ], + [ + 1364, + 480 + ], + [ + 1370, + 482 + ], + [ + 1383, + 484 + ], + [ + 1389, + 485 + ], + [ + 1363, + 519 + ], + [ + 1351, + 541 + ], + [ + 1348, + 581 + ], + [ + 1346, + 610 + ], + [ + 1349, + 630 + ], + [ + 1354, + 642 + ], + [ + 1361, + 647 + ], + [ + 1378, + 649 + ], + [ + 1385, + 646 + ], + [ + 1391, + 636 + ], + [ + 1395, + 624 + ], + [ + 1457, + 644 + ], + [ + 1458, + 666 + ], + [ + 1466, + 687 + ], + [ + 1478, + 694 + ], + [ + 1493, + 694 + ], + [ + 1502, + 693 + ], + [ + 1506, + 688 + ], + [ + 1510, + 682 + ], + [ + 1513, + 675 + ], + [ + 1518, + 664 + ], + [ + 1522, + 652 + ], + [ + 1747, + 661 + ], + [ + 1747, + 676 + ], + [ + 1753, + 693 + ], + [ + 1762, + 704 + ], + [ + 1777, + 709 + ], + [ + 1789, + 704 + ], + [ + 1799, + 698 + ], + [ + 1807, + 686 + ], + [ + 1814, + 672 + ], + [ + 1820, + 662 + ], + [ + 1831, + 658 + ], + [ + 1832, + 649 + ], + [ + 1834, + 626 + ], + [ + 1838, + 586 + ], + [ + 1831, + 561 + ], + [ + 1826, + 550 + ], + [ + 1823, + 537 + ], + [ + 1817, + 510 + ], + [ + 1805, + 465 + ], + [ + 1790, + 429 + ], + [ + 1779, + 411 + ], + [ + 1759, + 403 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1154, + 454 + ], + [ + 1152, + 507 + ], + [ + 1157, + 506 + ], + [ + 1159, + 454 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1234, + 458 + ], + [ + 1234, + 514 + ], + [ + 1238, + 514 + ], + [ + 1239, + 457 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 194, + 295 + ], + [ + 185, + 298 + ], + [ + 179, + 303 + ], + [ + 177, + 313 + ], + [ + 178, + 321 + ], + [ + 180, + 327 + ], + [ + 188, + 332 + ], + [ + 193, + 332 + ], + [ + 202, + 331 + ], + [ + 208, + 328 + ], + [ + 212, + 319 + ], + [ + 212, + 308 + ], + [ + 206, + 300 + ], + [ + 199, + 295 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 315, + 292 + ], + [ + 315, + 296 + ], + [ + 334, + 297 + ], + [ + 333, + 292 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 217, + 312 + ], + [ + 182, + 313 + ], + [ + 182, + 316 + ], + [ + 223, + 317 + ], + [ + 223, + 312 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 351, + 303 + ], + [ + 352, + 323 + ], + [ + 364, + 323 + ], + [ + 363, + 303 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 183, + 257 + ], + [ + 185, + 269 + ], + [ + 206, + 270 + ], + [ + 205, + 258 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 356, + 309 + ], + [ + 341, + 308 + ], + [ + 281, + 319 + ], + [ + 273, + 324 + ], + [ + 269, + 330 + ], + [ + 267, + 343 + ], + [ + 267, + 410 + ], + [ + 270, + 411 + ], + [ + 271, + 390 + ], + [ + 270, + 341 + ], + [ + 272, + 329 + ], + [ + 280, + 323 + ], + [ + 305, + 317 + ], + [ + 341, + 311 + ], + [ + 357, + 312 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 239, + 406 + ], + [ + 271, + 407 + ], + [ + 289, + 409 + ], + [ + 291, + 428 + ], + [ + 292, + 441 + ], + [ + 293, + 457 + ], + [ + 292, + 461 + ], + [ + 289, + 465 + ], + [ + 286, + 466 + ], + [ + 282, + 466 + ], + [ + 280, + 460 + ], + [ + 266, + 460 + ], + [ + 255, + 461 + ], + [ + 248, + 460 + ], + [ + 247, + 463 + ], + [ + 245, + 465 + ], + [ + 242, + 465 + ], + [ + 238, + 464 + ], + [ + 236, + 463 + ], + [ + 233, + 453 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 220, + 246 + ], + [ + 173, + 245 + ], + [ + 172, + 247 + ], + [ + 171, + 258 + ], + [ + 172, + 259 + ], + [ + 178, + 261 + ], + [ + 224, + 262 + ], + [ + 224, + 246 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 181, + 370 + ], + [ + 180, + 471 + ], + [ + 185, + 471 + ], + [ + 233, + 472 + ], + [ + 233, + 461 + ], + [ + 245, + 461 + ], + [ + 243, + 384 + ], + [ + 225, + 384 + ], + [ + 225, + 377 + ], + [ + 223, + 370 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 225, + 326 + ], + [ + 256, + 329 + ], + [ + 257, + 376 + ], + [ + 224, + 377 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 423, + 133 + ], + [ + 423, + 185 + ], + [ + 425, + 189 + ], + [ + 428, + 191 + ], + [ + 451, + 191 + ], + [ + 454, + 187 + ], + [ + 454, + 135 + ], + [ + 451, + 131 + ], + [ + 428, + 130 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 224, + 258 + ], + [ + 234, + 260 + ], + [ + 234, + 259 + ], + [ + 248, + 262 + ], + [ + 249, + 267 + ], + [ + 257, + 268 + ], + [ + 257, + 273 + ], + [ + 249, + 274 + ], + [ + 249, + 283 + ], + [ + 257, + 284 + ], + [ + 257, + 290 + ], + [ + 248, + 290 + ], + [ + 249, + 297 + ], + [ + 258, + 299 + ], + [ + 259, + 305 + ], + [ + 248, + 306 + ], + [ + 245, + 315 + ], + [ + 234, + 315 + ], + [ + 225, + 307 + ], + [ + 230, + 303 + ], + [ + 236, + 303 + ], + [ + 234, + 268 + ], + [ + 222, + 264 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 219, + 492 + ], + [ + 218, + 440 + ], + [ + 220, + 437 + ], + [ + 221, + 287 + ], + [ + 222, + 198 + ], + [ + 229, + 179 + ], + [ + 241, + 167 + ], + [ + 256, + 161 + ], + [ + 305, + 152 + ], + [ + 356, + 145 + ], + [ + 382, + 141 + ], + [ + 440, + 141 + ], + [ + 439, + 147 + ], + [ + 380, + 145 + ], + [ + 289, + 159 + ], + [ + 251, + 167 + ], + [ + 236, + 176 + ], + [ + 230, + 188 + ], + [ + 226, + 215 + ], + [ + 228, + 262 + ], + [ + 227, + 290 + ], + [ + 228, + 437 + ], + [ + 230, + 441 + ], + [ + 230, + 493 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 244, + 321 + ], + [ + 242, + 323 + ], + [ + 233, + 324 + ], + [ + 231, + 330 + ], + [ + 244, + 331 + ], + [ + 243, + 340 + ], + [ + 233, + 340 + ], + [ + 233, + 346 + ], + [ + 242, + 347 + ], + [ + 242, + 355 + ], + [ + 232, + 357 + ], + [ + 233, + 359 + ], + [ + 220, + 359 + ], + [ + 220, + 313 + ], + [ + 231, + 313 + ], + [ + 234, + 314 + ], + [ + 244, + 315 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 169, + 481 + ], + [ + 162, + 479 + ], + [ + 158, + 482 + ], + [ + 149, + 482 + ], + [ + 135, + 487 + ], + [ + 111, + 485 + ], + [ + 121, + 458 + ], + [ + 172, + 455 + ], + [ + 173, + 458 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 175, + 461 + ], + [ + 178, + 458 + ], + [ + 177, + 451 + ], + [ + 175, + 446 + ], + [ + 171, + 441 + ], + [ + 168, + 438 + ], + [ + 163, + 436 + ], + [ + 158, + 439 + ], + [ + 148, + 442 + ], + [ + 140, + 445 + ], + [ + 133, + 445 + ], + [ + 122, + 445 + ], + [ + 121, + 445 + ], + [ + 116, + 446 + ], + [ + 113, + 454 + ], + [ + 112, + 461 + ], + [ + 110, + 466 + ], + [ + 110, + 476 + ], + [ + 112, + 478 + ], + [ + 117, + 476 + ], + [ + 121, + 471 + ], + [ + 126, + 466 + ], + [ + 132, + 466 + ], + [ + 142, + 466 + ], + [ + 145, + 464 + ], + [ + 152, + 463 + ], + [ + 156, + 462 + ], + [ + 165, + 461 + ], + [ + 169, + 462 + ], + [ + 171, + 462 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 15, + 315 + ], + [ + 34, + 313 + ], + [ + 34, + 300 + ], + [ + 36, + 300 + ], + [ + 37, + 325 + ], + [ + 34, + 325 + ], + [ + 33, + 316 + ], + [ + 11, + 318 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 36, + 267 + ], + [ + 29, + 276 + ], + [ + 26, + 285 + ], + [ + 27, + 295 + ], + [ + 32, + 305 + ], + [ + 33, + 308 + ], + [ + 35, + 305 + ], + [ + 37, + 294 + ], + [ + 37, + 277 + ], + [ + 37, + 268 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 18, + 294 + ], + [ + 17, + 350 + ], + [ + 0, + 352 + ], + [ + 0, + 291 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 188, + 359 + ], + [ + 184, + 356 + ], + [ + 179, + 351 + ], + [ + 179, + 343 + ], + [ + 181, + 338 + ], + [ + 185, + 335 + ], + [ + 190, + 332 + ], + [ + 195, + 331 + ], + [ + 200, + 332 + ], + [ + 205, + 335 + ], + [ + 208, + 339 + ], + [ + 210, + 345 + ], + [ + 209, + 351 + ], + [ + 204, + 356 + ], + [ + 200, + 358 + ], + [ + 195, + 360 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 815, + 367 + ], + [ + 782, + 366 + ], + [ + 781, + 382 + ], + [ + 816, + 384 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 313, + 403 + ], + [ + 310, + 406 + ], + [ + 311, + 413 + ], + [ + 308, + 416 + ], + [ + 303, + 426 + ], + [ + 300, + 440 + ], + [ + 301, + 441 + ], + [ + 307, + 433 + ], + [ + 309, + 438 + ], + [ + 308, + 457 + ], + [ + 309, + 470 + ], + [ + 308, + 476 + ], + [ + 310, + 478 + ], + [ + 314, + 477 + ], + [ + 315, + 471 + ], + [ + 314, + 458 + ], + [ + 314, + 449 + ], + [ + 316, + 445 + ], + [ + 318, + 447 + ], + [ + 320, + 456 + ], + [ + 324, + 466 + ], + [ + 325, + 471 + ], + [ + 325, + 476 + ], + [ + 325, + 479 + ], + [ + 328, + 476 + ], + [ + 330, + 472 + ], + [ + 331, + 463 + ], + [ + 329, + 458 + ], + [ + 326, + 449 + ], + [ + 327, + 443 + ], + [ + 331, + 440 + ], + [ + 332, + 426 + ], + [ + 332, + 420 + ], + [ + 326, + 417 + ], + [ + 320, + 412 + ], + [ + 319, + 410 + ], + [ + 317, + 406 + ], + [ + 314, + 403 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 344, + 405 + ], + [ + 347, + 410 + ], + [ + 348, + 417 + ], + [ + 351, + 420 + ], + [ + 353, + 423 + ], + [ + 353, + 429 + ], + [ + 353, + 431 + ], + [ + 350, + 435 + ], + [ + 350, + 442 + ], + [ + 350, + 446 + ], + [ + 348, + 452 + ], + [ + 347, + 461 + ], + [ + 346, + 469 + ], + [ + 345, + 475 + ], + [ + 343, + 477 + ], + [ + 339, + 476 + ], + [ + 340, + 472 + ], + [ + 343, + 464 + ], + [ + 343, + 455 + ], + [ + 343, + 449 + ], + [ + 342, + 447 + ], + [ + 342, + 445 + ], + [ + 340, + 448 + ], + [ + 340, + 457 + ], + [ + 339, + 466 + ], + [ + 339, + 471 + ], + [ + 338, + 474 + ], + [ + 337, + 476 + ], + [ + 335, + 476 + ], + [ + 333, + 476 + ], + [ + 332, + 474 + ], + [ + 333, + 470 + ], + [ + 333, + 465 + ], + [ + 334, + 454 + ], + [ + 335, + 447 + ], + [ + 335, + 442 + ], + [ + 331, + 440 + ], + [ + 326, + 433 + ], + [ + 330, + 421 + ], + [ + 334, + 418 + ], + [ + 337, + 412 + ], + [ + 338, + 407 + ], + [ + 340, + 406 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 55, + 386 + ], + [ + 55, + 393 + ], + [ + 57, + 397 + ], + [ + 61, + 398 + ], + [ + 65, + 403 + ], + [ + 65, + 410 + ], + [ + 62, + 428 + ], + [ + 56, + 461 + ], + [ + 56, + 470 + ], + [ + 50, + 485 + ], + [ + 47, + 490 + ], + [ + 46, + 493 + ], + [ + 51, + 495 + ], + [ + 60, + 496 + ], + [ + 70, + 496 + ], + [ + 79, + 494 + ], + [ + 85, + 491 + ], + [ + 83, + 475 + ], + [ + 78, + 454 + ], + [ + 82, + 437 + ], + [ + 86, + 420 + ], + [ + 86, + 408 + ], + [ + 85, + 398 + ], + [ + 78, + 391 + ], + [ + 71, + 383 + ], + [ + 67, + 381 + ], + [ + 61, + 380 + ], + [ + 57, + 382 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 27, + 392 + ], + [ + 23, + 397 + ], + [ + 20, + 408 + ], + [ + 8, + 415 + ], + [ + 5, + 427 + ], + [ + 5, + 457 + ], + [ + 8, + 463 + ], + [ + 8, + 480 + ], + [ + 8, + 498 + ], + [ + 7, + 504 + ], + [ + 15, + 507 + ], + [ + 17, + 505 + ], + [ + 19, + 499 + ], + [ + 20, + 478 + ], + [ + 25, + 464 + ], + [ + 29, + 480 + ], + [ + 36, + 494 + ], + [ + 37, + 502 + ], + [ + 42, + 506 + ], + [ + 49, + 503 + ], + [ + 52, + 497 + ], + [ + 54, + 493 + ], + [ + 53, + 490 + ], + [ + 50, + 487 + ], + [ + 44, + 476 + ], + [ + 42, + 470 + ], + [ + 43, + 462 + ], + [ + 46, + 457 + ], + [ + 46, + 450 + ], + [ + 42, + 428 + ], + [ + 40, + 415 + ], + [ + 37, + 409 + ], + [ + 36, + 406 + ], + [ + 35, + 398 + ], + [ + 31, + 392 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000153_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000153_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..bc90d6c4945c38720ff523dda383137ad236adf3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000153_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5772f34f9b3c2693c1f910d27220cf8e5ae821ee Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ab6d6de77af87b60e50f46b21f7cf58e5e1ff618 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..222633eb5625a6273920e15bdf6fd9b14a2506e3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..95d965363ab26fff329e9cae5bf861b457869e25 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000154_000019_gtFine_polygons.json @@ -0,0 +1,2847 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 26, + 357 + ], + [ + 2048, + 405 + ], + [ + 2048, + 1023 + ], + [ + 0, + 1024 + ], + [ + 0, + 343 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 706, + 28 + ], + [ + 1059, + 247 + ], + [ + 1449, + 219 + ], + [ + 1642, + 0 + ], + [ + 703, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 17, + 590 + ], + [ + 754, + 492 + ], + [ + 1005, + 459 + ], + [ + 993, + 452 + ], + [ + 908, + 447 + ], + [ + 612, + 468 + ], + [ + 390, + 468 + ], + [ + 60, + 486 + ], + [ + 0, + 486 + ], + [ + 0, + 592 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2004, + 1017 + ], + [ + 1380, + 536 + ], + [ + 1296, + 451 + ], + [ + 1470, + 432 + ], + [ + 2048, + 513 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 803, + 0 + ], + [ + 807, + 9 + ], + [ + 826, + 9 + ], + [ + 824, + 2 + ], + [ + 835, + 3 + ], + [ + 836, + 8 + ], + [ + 843, + 9 + ], + [ + 849, + 0 + ], + [ + 856, + 0 + ], + [ + 854, + 45 + ], + [ + 915, + 47 + ], + [ + 916, + 81 + ], + [ + 998, + 82 + ], + [ + 999, + 89 + ], + [ + 1020, + 90 + ], + [ + 1022, + 190 + ], + [ + 1101, + 193 + ], + [ + 1102, + 186 + ], + [ + 1117, + 186 + ], + [ + 1119, + 193 + ], + [ + 1194, + 196 + ], + [ + 1199, + 186 + ], + [ + 1202, + 186 + ], + [ + 1203, + 181 + ], + [ + 1207, + 181 + ], + [ + 1210, + 188 + ], + [ + 1219, + 189 + ], + [ + 1220, + 183 + ], + [ + 1233, + 183 + ], + [ + 1233, + 189 + ], + [ + 1249, + 191 + ], + [ + 1247, + 185 + ], + [ + 1263, + 184 + ], + [ + 1264, + 191 + ], + [ + 1268, + 192 + ], + [ + 1274, + 187 + ], + [ + 1281, + 194 + ], + [ + 1284, + 193 + ], + [ + 1283, + 186 + ], + [ + 1294, + 186 + ], + [ + 1295, + 194 + ], + [ + 1322, + 194 + ], + [ + 1320, + 65 + ], + [ + 1326, + 57 + ], + [ + 1326, + 28 + ], + [ + 1335, + 12 + ], + [ + 1376, + 7 + ], + [ + 1400, + 18 + ], + [ + 1406, + 17 + ], + [ + 1405, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 533 + ], + [ + 2046, + 531 + ], + [ + 1770, + 501 + ], + [ + 1724, + 493 + ], + [ + 1623, + 481 + ], + [ + 1610, + 483 + ], + [ + 1583, + 472 + ], + [ + 1514, + 462 + ], + [ + 1501, + 465 + ], + [ + 1501, + 456 + ], + [ + 1468, + 456 + ], + [ + 1452, + 448 + ], + [ + 1427, + 439 + ], + [ + 1387, + 448 + ], + [ + 1318, + 439 + ], + [ + 1298, + 442 + ], + [ + 1252, + 440 + ], + [ + 1157, + 440 + ], + [ + 929, + 459 + ], + [ + 871, + 460 + ], + [ + 775, + 466 + ], + [ + 771, + 468 + ], + [ + 757, + 468 + ], + [ + 754, + 464 + ], + [ + 716, + 464 + ], + [ + 715, + 472 + ], + [ + 696, + 472 + ], + [ + 696, + 465 + ], + [ + 669, + 465 + ], + [ + 632, + 476 + ], + [ + 581, + 487 + ], + [ + 535, + 485 + ], + [ + 452, + 488 + ], + [ + 130, + 522 + ], + [ + 0, + 532 + ], + [ + 0, + 0 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 139, + 350 + ], + [ + 277, + 346 + ], + [ + 397, + 357 + ], + [ + 362, + 362 + ], + [ + 361, + 525 + ], + [ + 288, + 524 + ], + [ + 255, + 527 + ], + [ + 237, + 537 + ], + [ + 170, + 536 + ], + [ + 170, + 517 + ], + [ + 143, + 519 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 182, + 546 + ], + [ + 193, + 546 + ], + [ + 184, + 0 + ], + [ + 174, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 407, + 352 + ], + [ + 415, + 520 + ], + [ + 421, + 520 + ], + [ + 416, + 348 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 384, + 293 + ], + [ + 436, + 293 + ], + [ + 439, + 359 + ], + [ + 385, + 361 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 443, + 463 + ], + [ + 417, + 464 + ], + [ + 415, + 469 + ], + [ + 417, + 503 + ], + [ + 431, + 502 + ], + [ + 439, + 495 + ], + [ + 442, + 472 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 484, + 236 + ], + [ + 484, + 286 + ], + [ + 545, + 285 + ], + [ + 544, + 236 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 483, + 181 + ], + [ + 485, + 245 + ], + [ + 545, + 243 + ], + [ + 544, + 181 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 664, + 103 + ], + [ + 671, + 487 + ], + [ + 677, + 484 + ], + [ + 670, + 102 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 665, + 94 + ], + [ + 686, + 95 + ], + [ + 694, + 101 + ], + [ + 660, + 109 + ], + [ + 658, + 107 + ], + [ + 660, + 98 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 889, + 255 + ], + [ + 887, + 364 + ], + [ + 892, + 365 + ], + [ + 893, + 383 + ], + [ + 868, + 386 + ], + [ + 867, + 257 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 983, + 249 + ], + [ + 983, + 408 + ], + [ + 990, + 407 + ], + [ + 985, + 246 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1001, + 246 + ], + [ + 984, + 246 + ], + [ + 984, + 250 + ], + [ + 997, + 251 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1118, + 311 + ], + [ + 1119, + 427 + ], + [ + 1122, + 427 + ], + [ + 1120, + 311 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1162, + 445 + ], + [ + 1107, + 454 + ], + [ + 987, + 461 + ], + [ + 984, + 404 + ], + [ + 1001, + 395 + ], + [ + 1025, + 393 + ], + [ + 1043, + 393 + ], + [ + 1048, + 400 + ], + [ + 1109, + 401 + ], + [ + 1112, + 425 + ], + [ + 1159, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 963, + 428 + ], + [ + 942, + 428 + ], + [ + 936, + 432 + ], + [ + 932, + 438 + ], + [ + 927, + 441 + ], + [ + 924, + 446 + ], + [ + 923, + 449 + ], + [ + 924, + 461 + ], + [ + 927, + 462 + ], + [ + 930, + 461 + ], + [ + 934, + 460 + ], + [ + 936, + 460 + ], + [ + 939, + 461 + ], + [ + 943, + 461 + ], + [ + 946, + 459 + ], + [ + 954, + 459 + ], + [ + 955, + 461 + ], + [ + 960, + 462 + ], + [ + 963, + 460 + ], + [ + 964, + 458 + ], + [ + 968, + 458 + ], + [ + 970, + 461 + ], + [ + 976, + 461 + ], + [ + 978, + 452 + ], + [ + 979, + 445 + ], + [ + 976, + 438 + ], + [ + 971, + 429 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1237, + 417 + ], + [ + 1237, + 449 + ], + [ + 1305, + 448 + ], + [ + 1298, + 437 + ], + [ + 1300, + 409 + ] + ] + }, + { + "label": "rider", + "polygon": [ + [ + 1229, + 420 + ], + [ + 1227, + 427 + ], + [ + 1230, + 439 + ], + [ + 1230, + 457 + ], + [ + 1231, + 457 + ], + [ + 1233, + 456 + ], + [ + 1238, + 453 + ], + [ + 1241, + 447 + ], + [ + 1241, + 437 + ], + [ + 1244, + 430 + ], + [ + 1244, + 419 + ], + [ + 1239, + 411 + ], + [ + 1235, + 410 + ], + [ + 1233, + 413 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1234, + 441 + ], + [ + 1234, + 456 + ], + [ + 1235, + 463 + ], + [ + 1238, + 462 + ], + [ + 1238, + 454 + ], + [ + 1238, + 446 + ], + [ + 1239, + 443 + ], + [ + 1239, + 439 + ], + [ + 1237, + 432 + ], + [ + 1234, + 432 + ], + [ + 1233, + 436 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1251, + 370 + ], + [ + 1257, + 370 + ], + [ + 1256, + 355 + ], + [ + 1250, + 355 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1306, + 376 + ], + [ + 1307, + 398 + ], + [ + 1325, + 397 + ], + [ + 1324, + 375 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1320, + 420 + ], + [ + 1311, + 420 + ], + [ + 1309, + 423 + ], + [ + 1303, + 428 + ], + [ + 1301, + 435 + ], + [ + 1300, + 440 + ], + [ + 1300, + 446 + ], + [ + 1301, + 452 + ], + [ + 1302, + 456 + ], + [ + 1303, + 460 + ], + [ + 1309, + 461 + ], + [ + 1312, + 456 + ], + [ + 1315, + 447 + ], + [ + 1323, + 436 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1450, + 339 + ], + [ + 1429, + 342 + ], + [ + 1415, + 339 + ], + [ + 1411, + 334 + ], + [ + 1410, + 334 + ], + [ + 1396, + 336 + ], + [ + 1388, + 336 + ], + [ + 1378, + 338 + ], + [ + 1367, + 342 + ], + [ + 1362, + 358 + ], + [ + 1361, + 364 + ], + [ + 1353, + 370 + ], + [ + 1351, + 370 + ], + [ + 1347, + 373 + ], + [ + 1340, + 379 + ], + [ + 1337, + 385 + ], + [ + 1343, + 393 + ], + [ + 1347, + 399 + ], + [ + 1348, + 412 + ], + [ + 1351, + 416 + ], + [ + 1357, + 415 + ], + [ + 1365, + 415 + ], + [ + 1373, + 415 + ], + [ + 1378, + 414 + ], + [ + 1382, + 408 + ], + [ + 1388, + 404 + ], + [ + 1389, + 409 + ], + [ + 1390, + 420 + ], + [ + 1385, + 421 + ], + [ + 1380, + 423 + ], + [ + 1382, + 429 + ], + [ + 1389, + 429 + ], + [ + 1400, + 430 + ], + [ + 1411, + 429 + ], + [ + 1411, + 437 + ], + [ + 1414, + 442 + ], + [ + 1427, + 445 + ], + [ + 1443, + 446 + ], + [ + 1453, + 443 + ], + [ + 1451, + 408 + ], + [ + 1451, + 383 + ], + [ + 1446, + 382 + ], + [ + 1445, + 373 + ], + [ + 1452, + 369 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1355, + 411 + ], + [ + 1319, + 413 + ], + [ + 1313, + 422 + ], + [ + 1309, + 435 + ], + [ + 1307, + 451 + ], + [ + 1309, + 460 + ], + [ + 1311, + 464 + ], + [ + 1315, + 464 + ], + [ + 1317, + 463 + ], + [ + 1318, + 461 + ], + [ + 1322, + 460 + ], + [ + 1327, + 458 + ], + [ + 1333, + 458 + ], + [ + 1339, + 456 + ], + [ + 1348, + 457 + ], + [ + 1361, + 454 + ], + [ + 1371, + 444 + ], + [ + 1372, + 437 + ], + [ + 1367, + 422 + ], + [ + 1359, + 412 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1391, + 268 + ], + [ + 1388, + 264 + ], + [ + 1383, + 265 + ], + [ + 1374, + 271 + ], + [ + 1374, + 273 + ], + [ + 1382, + 270 + ], + [ + 1389, + 269 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1331, + 374 + ], + [ + 1332, + 464 + ], + [ + 1336, + 464 + ], + [ + 1333, + 374 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1332, + 354 + ], + [ + 1328, + 355 + ], + [ + 1326, + 358 + ], + [ + 1325, + 361 + ], + [ + 1327, + 366 + ], + [ + 1330, + 367 + ], + [ + 1331, + 369 + ], + [ + 1324, + 369 + ], + [ + 1324, + 377 + ], + [ + 1341, + 377 + ], + [ + 1341, + 368 + ], + [ + 1335, + 368 + ], + [ + 1337, + 366 + ], + [ + 1340, + 363 + ], + [ + 1339, + 358 + ], + [ + 1336, + 354 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1335, + 288 + ], + [ + 1335, + 348 + ], + [ + 1336, + 349 + ], + [ + 1371, + 350 + ], + [ + 1375, + 288 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1356, + 311 + ], + [ + 1362, + 440 + ], + [ + 1368, + 441 + ], + [ + 1361, + 293 + ], + [ + 1353, + 78 + ], + [ + 1346, + 78 + ], + [ + 1350, + 136 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1397, + 233 + ], + [ + 1393, + 237 + ], + [ + 1382, + 233 + ], + [ + 1384, + 229 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1327, + 75 + ], + [ + 1334, + 69 + ], + [ + 1342, + 69 + ], + [ + 1357, + 74 + ], + [ + 1351, + 82 + ], + [ + 1342, + 84 + ], + [ + 1332, + 83 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1409, + 451 + ], + [ + 1389, + 453 + ], + [ + 1378, + 450 + ], + [ + 1379, + 427 + ], + [ + 1413, + 429 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1380, + 283 + ], + [ + 1375, + 283 + ], + [ + 1375, + 286 + ], + [ + 1384, + 287 + ], + [ + 1384, + 283 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1393, + 468 + ], + [ + 1342, + 472 + ], + [ + 1342, + 442 + ], + [ + 1389, + 438 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1379, + 184 + ], + [ + 1388, + 431 + ], + [ + 1391, + 431 + ], + [ + 1382, + 175 + ], + [ + 1378, + 175 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1385, + 173 + ], + [ + 1371, + 170 + ], + [ + 1363, + 175 + ], + [ + 1372, + 181 + ], + [ + 1379, + 178 + ], + [ + 1383, + 178 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1370, + 135 + ], + [ + 1373, + 294 + ], + [ + 1372, + 484 + ], + [ + 1381, + 484 + ], + [ + 1373, + 134 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1376, + 134 + ], + [ + 1368, + 122 + ], + [ + 1344, + 123 + ], + [ + 1343, + 134 + ], + [ + 1369, + 138 + ], + [ + 1374, + 140 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1402, + 486 + ], + [ + 1401, + 433 + ], + [ + 1406, + 433 + ], + [ + 1406, + 487 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1412, + 490 + ], + [ + 1412, + 433 + ], + [ + 1408, + 433 + ], + [ + 1408, + 489 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1414, + 434 + ], + [ + 1417, + 493 + ], + [ + 1421, + 493 + ], + [ + 1419, + 434 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1425, + 435 + ], + [ + 1426, + 494 + ], + [ + 1430, + 497 + ], + [ + 1430, + 436 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1435, + 438 + ], + [ + 1436, + 500 + ], + [ + 1438, + 502 + ], + [ + 1439, + 436 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1445, + 435 + ], + [ + 1445, + 507 + ], + [ + 1449, + 507 + ], + [ + 1448, + 436 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1457, + 435 + ], + [ + 1458, + 513 + ], + [ + 1463, + 513 + ], + [ + 1461, + 434 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1471, + 434 + ], + [ + 1473, + 517 + ], + [ + 1478, + 517 + ], + [ + 1475, + 433 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1490, + 438 + ], + [ + 1489, + 514 + ], + [ + 1495, + 515 + ], + [ + 1495, + 433 + ], + [ + 1490, + 433 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1513, + 432 + ], + [ + 1511, + 510 + ], + [ + 1517, + 511 + ], + [ + 1519, + 432 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1563, + 434 + ], + [ + 1564, + 507 + ], + [ + 1573, + 507 + ], + [ + 1571, + 433 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1597, + 402 + ], + [ + 1585, + 427 + ], + [ + 1584, + 439 + ], + [ + 1583, + 448 + ], + [ + 1585, + 451 + ], + [ + 1594, + 451 + ], + [ + 1606, + 451 + ], + [ + 1618, + 452 + ], + [ + 1623, + 449 + ], + [ + 1622, + 422 + ], + [ + 1616, + 408 + ], + [ + 1610, + 417 + ], + [ + 1606, + 423 + ], + [ + 1603, + 427 + ], + [ + 1599, + 406 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1584, + 455 + ], + [ + 1585, + 481 + ], + [ + 1597, + 479 + ], + [ + 1606, + 485 + ], + [ + 1621, + 483 + ], + [ + 1622, + 452 + ], + [ + 1584, + 450 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1367, + 294 + ], + [ + 1365, + 300 + ], + [ + 1365, + 308 + ], + [ + 1370, + 314 + ], + [ + 1365, + 314 + ], + [ + 1366, + 342 + ], + [ + 1371, + 342 + ], + [ + 1366, + 346 + ], + [ + 1365, + 351 + ], + [ + 1365, + 360 + ], + [ + 1368, + 366 + ], + [ + 1374, + 367 + ], + [ + 1378, + 364 + ], + [ + 1381, + 359 + ], + [ + 1383, + 353 + ], + [ + 1381, + 348 + ], + [ + 1376, + 341 + ], + [ + 1384, + 341 + ], + [ + 1384, + 314 + ], + [ + 1379, + 314 + ], + [ + 1383, + 311 + ], + [ + 1386, + 307 + ], + [ + 1387, + 299 + ], + [ + 1382, + 293 + ], + [ + 1376, + 291 + ], + [ + 1371, + 291 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1673, + 223 + ], + [ + 1617, + 223 + ], + [ + 1613, + 280 + ], + [ + 1621, + 283 + ], + [ + 1682, + 285 + ], + [ + 1682, + 275 + ], + [ + 1767, + 275 + ], + [ + 1767, + 185 + ], + [ + 1673, + 189 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1522, + 275 + ], + [ + 1537, + 518 + ], + [ + 1550, + 518 + ], + [ + 1529, + 281 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1438, + 29 + ], + [ + 1447, + 285 + ], + [ + 1448, + 289 + ], + [ + 1456, + 293 + ], + [ + 1605, + 285 + ], + [ + 1614, + 280 + ], + [ + 1619, + 150 + ], + [ + 1592, + 15 + ], + [ + 1581, + 14 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1596, + 505 + ], + [ + 1487, + 507 + ], + [ + 1489, + 534 + ], + [ + 1480, + 535 + ], + [ + 1482, + 569 + ], + [ + 1603, + 571 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1596, + 435 + ], + [ + 1599, + 573 + ], + [ + 1608, + 574 + ], + [ + 1607, + 434 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1609, + 129 + ], + [ + 1623, + 619 + ], + [ + 1644, + 618 + ], + [ + 1624, + 123 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1582, + 36 + ], + [ + 1586, + 138 + ], + [ + 1644, + 129 + ], + [ + 1646, + 0 + ], + [ + 1579, + 0 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1805, + 0 + ], + [ + 1808, + 63 + ], + [ + 1818, + 62 + ], + [ + 1821, + 165 + ], + [ + 1733, + 168 + ], + [ + 1724, + 75 + ], + [ + 1702, + 77 + ], + [ + 1697, + 0 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1734, + 479 + ], + [ + 1725, + 559 + ], + [ + 1720, + 573 + ], + [ + 1706, + 578 + ], + [ + 1681, + 576 + ], + [ + 1668, + 567 + ], + [ + 1667, + 471 + ], + [ + 1676, + 463 + ], + [ + 1705, + 461 + ], + [ + 1735, + 464 + ], + [ + 1739, + 470 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1655, + 8 + ], + [ + 1664, + 433 + ], + [ + 1672, + 444 + ], + [ + 1675, + 630 + ], + [ + 1645, + 629 + ], + [ + 1640, + 442 + ], + [ + 1648, + 433 + ], + [ + 1641, + 0 + ], + [ + 1654, + 0 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1651, + 143 + ], + [ + 1637, + 149 + ], + [ + 1626, + 160 + ], + [ + 1623, + 186 + ], + [ + 1624, + 208 + ], + [ + 1629, + 221 + ], + [ + 1636, + 227 + ], + [ + 1648, + 225 + ], + [ + 1659, + 216 + ], + [ + 1670, + 198 + ], + [ + 1671, + 179 + ], + [ + 1670, + 162 + ], + [ + 1663, + 150 + ], + [ + 1658, + 146 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000155_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000155_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..6a6895fd061e9f26163b881a6708383e0404debd Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000155_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000155_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000155_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..7f07d9852dbf90fb82536a116994297ce8b54d1a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000155_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000156_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000156_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ac17e168f54682c94d851f3c141af9fc2de8890a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000156_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000156_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000156_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..1813e5eef87fdfa452daa9f69eb27b1e333b392d --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000156_000019_gtFine_polygons.json @@ -0,0 +1,4485 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 95, + 313 + ], + [ + 2048, + 327 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 269 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 267, + 509 + ], + [ + 211, + 515 + ], + [ + 215, + 491 + ], + [ + 296, + 483 + ], + [ + 289, + 507 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 259, + 20 + ], + [ + 399, + 250 + ], + [ + 829, + 391 + ], + [ + 1041, + 437 + ], + [ + 1349, + 387 + ], + [ + 1600, + 1 + ], + [ + 1590, + 0 + ], + [ + 271, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2003, + 613 + ], + [ + 1760, + 615 + ], + [ + 1675, + 614 + ], + [ + 1655, + 615 + ], + [ + 1650, + 619 + ], + [ + 1646, + 629 + ], + [ + 1649, + 642 + ], + [ + 1670, + 647 + ], + [ + 1753, + 660 + ], + [ + 1891, + 660 + ], + [ + 2048, + 654 + ], + [ + 2048, + 624 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 2003, + 613 + ], + [ + 1760, + 615 + ], + [ + 1675, + 614 + ], + [ + 1655, + 615 + ], + [ + 1650, + 619 + ], + [ + 1646, + 629 + ], + [ + 1649, + 642 + ], + [ + 1670, + 647 + ], + [ + 1753, + 660 + ], + [ + 1891, + 660 + ], + [ + 2048, + 654 + ], + [ + 2048, + 624 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2050, + 593 + ], + [ + 1728, + 580 + ], + [ + 1542, + 576 + ], + [ + 1335, + 557 + ], + [ + 1269, + 548 + ], + [ + 1257, + 531 + ], + [ + 1256, + 490 + ], + [ + 1255, + 481 + ], + [ + 1247, + 477 + ], + [ + 1261, + 466 + ], + [ + 1495, + 455 + ], + [ + 2048, + 519 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 0, + 0 + ], + [ + 391, + 0 + ], + [ + 394, + 2 + ], + [ + 408, + 4 + ], + [ + 442, + 25 + ], + [ + 437, + 31 + ], + [ + 423, + 39 + ], + [ + 420, + 40 + ], + [ + 419, + 103 + ], + [ + 433, + 113 + ], + [ + 428, + 122 + ], + [ + 422, + 128 + ], + [ + 423, + 172 + ], + [ + 442, + 173 + ], + [ + 442, + 169 + ], + [ + 490, + 169 + ], + [ + 493, + 172 + ], + [ + 501, + 175 + ], + [ + 509, + 182 + ], + [ + 504, + 185 + ], + [ + 505, + 201 + ], + [ + 512, + 198 + ], + [ + 516, + 199 + ], + [ + 521, + 202 + ], + [ + 592, + 202 + ], + [ + 665, + 239 + ], + [ + 670, + 244 + ], + [ + 661, + 243 + ], + [ + 663, + 254 + ], + [ + 675, + 259 + ], + [ + 675, + 267 + ], + [ + 702, + 278 + ], + [ + 713, + 302 + ], + [ + 745, + 320 + ], + [ + 860, + 366 + ], + [ + 907, + 367 + ], + [ + 924, + 373 + ], + [ + 926, + 386 + ], + [ + 954, + 385 + ], + [ + 983, + 388 + ], + [ + 1037, + 422 + ], + [ + 1124, + 427 + ], + [ + 1177, + 386 + ], + [ + 1182, + 377 + ], + [ + 1182, + 351 + ], + [ + 1179, + 343 + ], + [ + 1185, + 344 + ], + [ + 1184, + 326 + ], + [ + 1180, + 326 + ], + [ + 1181, + 301 + ], + [ + 1191, + 301 + ], + [ + 1196, + 297 + ], + [ + 1195, + 287 + ], + [ + 1186, + 286 + ], + [ + 1191, + 260 + ], + [ + 1194, + 246 + ], + [ + 1205, + 230 + ], + [ + 1208, + 241 + ], + [ + 1210, + 241 + ], + [ + 1217, + 228 + ], + [ + 1258, + 203 + ], + [ + 1258, + 174 + ], + [ + 1282, + 173 + ], + [ + 1296, + 172 + ], + [ + 1296, + 176 + ], + [ + 1328, + 177 + ], + [ + 1327, + 172 + ], + [ + 1345, + 172 + ], + [ + 1346, + 176 + ], + [ + 1376, + 175 + ], + [ + 1375, + 170 + ], + [ + 1384, + 170 + ], + [ + 1386, + 174 + ], + [ + 1402, + 175 + ], + [ + 1426, + 202 + ], + [ + 1416, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 549 + ], + [ + 1935, + 543 + ], + [ + 1797, + 532 + ], + [ + 1648, + 528 + ], + [ + 1459, + 474 + ], + [ + 1414, + 473 + ], + [ + 1351, + 474 + ], + [ + 1283, + 475 + ], + [ + 1254, + 465 + ], + [ + 1229, + 473 + ], + [ + 1230, + 480 + ], + [ + 1212, + 480 + ], + [ + 1182, + 469 + ], + [ + 1025, + 477 + ], + [ + 954, + 474 + ], + [ + 809, + 477 + ], + [ + 754, + 478 + ], + [ + 324, + 471 + ], + [ + 276, + 507 + ], + [ + 284, + 490 + ], + [ + 272, + 488 + ], + [ + 234, + 494 + ], + [ + 0, + 517 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1391, + 17 + ], + [ + 1392, + 218 + ], + [ + 1432, + 242 + ], + [ + 1430, + 0 + ], + [ + 1390, + 0 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1873, + 271 + ], + [ + 1905, + 272 + ], + [ + 1909, + 360 + ], + [ + 1872, + 360 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1957, + 274 + ], + [ + 1946, + 276 + ], + [ + 1932, + 268 + ], + [ + 1929, + 361 + ], + [ + 1959, + 353 + ], + [ + 1962, + 346 + ], + [ + 1978, + 343 + ], + [ + 1978, + 329 + ], + [ + 1954, + 331 + ], + [ + 1955, + 322 + ], + [ + 1978, + 320 + ], + [ + 1977, + 302 + ], + [ + 1958, + 303 + ], + [ + 1958, + 292 + ], + [ + 1978, + 290 + ], + [ + 1977, + 274 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1905, + 0 + ], + [ + 1893, + 574 + ], + [ + 1901, + 576 + ], + [ + 1936, + 579 + ], + [ + 1936, + 0 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1865, + 173 + ], + [ + 1807, + 187 + ], + [ + 1806, + 20 + ], + [ + 1862, + 22 + ], + [ + 1887, + 34 + ], + [ + 1884, + 52 + ], + [ + 1865, + 55 + ], + [ + 1866, + 79 + ], + [ + 1891, + 81 + ], + [ + 1892, + 102 + ], + [ + 1864, + 109 + ], + [ + 1864, + 134 + ], + [ + 1891, + 135 + ], + [ + 1891, + 156 + ], + [ + 1863, + 159 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1864, + 193 + ], + [ + 1759, + 216 + ], + [ + 1819, + 336 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1798, + 131 + ], + [ + 1734, + 135 + ], + [ + 1734, + 144 + ], + [ + 1798, + 139 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1956, + 19 + ], + [ + 1901, + 74 + ], + [ + 1840, + 21 + ], + [ + 1865, + 0 + ], + [ + 1935, + 0 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1791, + 181 + ], + [ + 1792, + 440 + ], + [ + 1787, + 448 + ], + [ + 1796, + 626 + ], + [ + 1828, + 626 + ], + [ + 1825, + 451 + ], + [ + 1817, + 435 + ], + [ + 1811, + 0 + ], + [ + 1783, + 0 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1800, + 348 + ], + [ + 1809, + 355 + ], + [ + 1816, + 368 + ], + [ + 1818, + 394 + ], + [ + 1818, + 414 + ], + [ + 1811, + 426 + ], + [ + 1801, + 433 + ], + [ + 1794, + 434 + ], + [ + 1787, + 432 + ], + [ + 1786, + 352 + ], + [ + 1791, + 348 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1724, + 86 + ], + [ + 1725, + 131 + ], + [ + 1729, + 178 + ], + [ + 1734, + 188 + ], + [ + 1745, + 164 + ], + [ + 1749, + 138 + ], + [ + 1735, + 99 + ], + [ + 1730, + 84 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1522, + 277 + ], + [ + 1523, + 309 + ], + [ + 1555, + 310 + ], + [ + 1555, + 279 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1644, + 500 + ], + [ + 1646, + 526 + ], + [ + 1604, + 526 + ], + [ + 1603, + 522 + ], + [ + 1602, + 497 + ], + [ + 1608, + 495 + ], + [ + 1630, + 494 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1567, + 483 + ], + [ + 1568, + 502 + ], + [ + 1535, + 505 + ], + [ + 1532, + 499 + ], + [ + 1531, + 480 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1480, + 488 + ], + [ + 1512, + 490 + ], + [ + 1510, + 474 + ], + [ + 1479, + 474 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1647, + 494 + ], + [ + 1647, + 485 + ], + [ + 1643, + 479 + ], + [ + 1637, + 481 + ], + [ + 1627, + 483 + ], + [ + 1622, + 483 + ], + [ + 1616, + 483 + ], + [ + 1611, + 479 + ], + [ + 1609, + 479 + ], + [ + 1606, + 481 + ], + [ + 1602, + 484 + ], + [ + 1600, + 491 + ], + [ + 1600, + 495 + ], + [ + 1602, + 497 + ], + [ + 1604, + 499 + ], + [ + 1611, + 501 + ], + [ + 1627, + 501 + ], + [ + 1633, + 501 + ], + [ + 1641, + 502 + ], + [ + 1644, + 499 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1575, + 473 + ], + [ + 1572, + 469 + ], + [ + 1566, + 466 + ], + [ + 1556, + 466 + ], + [ + 1549, + 467 + ], + [ + 1537, + 468 + ], + [ + 1529, + 469 + ], + [ + 1528, + 476 + ], + [ + 1529, + 484 + ], + [ + 1534, + 485 + ], + [ + 1542, + 488 + ], + [ + 1551, + 488 + ], + [ + 1561, + 484 + ], + [ + 1567, + 480 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1513, + 469 + ], + [ + 1503, + 461 + ], + [ + 1496, + 462 + ], + [ + 1486, + 463 + ], + [ + 1481, + 464 + ], + [ + 1476, + 467 + ], + [ + 1472, + 472 + ], + [ + 1473, + 477 + ], + [ + 1484, + 476 + ], + [ + 1494, + 476 + ], + [ + 1507, + 476 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1346, + 278 + ], + [ + 1330, + 271 + ], + [ + 1319, + 271 + ], + [ + 1300, + 272 + ], + [ + 1285, + 283 + ], + [ + 1276, + 290 + ], + [ + 1262, + 295 + ], + [ + 1244, + 307 + ], + [ + 1242, + 328 + ], + [ + 1238, + 340 + ], + [ + 1233, + 353 + ], + [ + 1233, + 374 + ], + [ + 1232, + 387 + ], + [ + 1234, + 399 + ], + [ + 1242, + 413 + ], + [ + 1247, + 421 + ], + [ + 1250, + 431 + ], + [ + 1284, + 416 + ], + [ + 1328, + 414 + ], + [ + 1333, + 413 + ], + [ + 1336, + 395 + ], + [ + 1345, + 395 + ], + [ + 1363, + 395 + ], + [ + 1365, + 391 + ], + [ + 1362, + 374 + ], + [ + 1362, + 358 + ], + [ + 1365, + 350 + ], + [ + 1367, + 338 + ], + [ + 1368, + 319 + ], + [ + 1367, + 302 + ], + [ + 1365, + 290 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1332, + 299 + ], + [ + 1305, + 303 + ], + [ + 1306, + 313 + ], + [ + 1319, + 316 + ], + [ + 1322, + 322 + ], + [ + 1304, + 323 + ], + [ + 1307, + 336 + ], + [ + 1322, + 336 + ], + [ + 1322, + 344 + ], + [ + 1305, + 346 + ], + [ + 1307, + 357 + ], + [ + 1333, + 363 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1401, + 251 + ], + [ + 1332, + 252 + ], + [ + 1332, + 272 + ], + [ + 1403, + 271 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1373, + 292 + ], + [ + 1356, + 289 + ], + [ + 1345, + 296 + ], + [ + 1334, + 312 + ], + [ + 1335, + 316 + ], + [ + 1337, + 317 + ], + [ + 1347, + 333 + ], + [ + 1359, + 336 + ], + [ + 1370, + 335 + ], + [ + 1379, + 330 + ], + [ + 1384, + 318 + ], + [ + 1384, + 307 + ], + [ + 1377, + 294 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1328, + 230 + ], + [ + 1328, + 422 + ], + [ + 1338, + 429 + ], + [ + 1336, + 230 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1344, + 343 + ], + [ + 1350, + 351 + ], + [ + 1351, + 360 + ], + [ + 1349, + 367 + ], + [ + 1341, + 374 + ], + [ + 1334, + 375 + ], + [ + 1325, + 373 + ], + [ + 1322, + 369 + ], + [ + 1318, + 360 + ], + [ + 1317, + 351 + ], + [ + 1322, + 343 + ], + [ + 1329, + 339 + ], + [ + 1339, + 339 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1275, + 0 + ], + [ + 1282, + 485 + ], + [ + 1293, + 485 + ], + [ + 1286, + 0 + ] + ] + }, + { + "label": "polegroup", + "polygon": [ + [ + 1282, + 511 + ], + [ + 1270, + 481 + ], + [ + 1258, + 476 + ], + [ + 1224, + 477 + ], + [ + 1225, + 451 + ], + [ + 1256, + 451 + ], + [ + 1267, + 451 + ], + [ + 1284, + 461 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1301, + 252 + ], + [ + 1286, + 256 + ], + [ + 1279, + 267 + ], + [ + 1276, + 277 + ], + [ + 1276, + 289 + ], + [ + 1283, + 299 + ], + [ + 1293, + 302 + ], + [ + 1304, + 299 + ], + [ + 1313, + 291 + ], + [ + 1316, + 277 + ], + [ + 1316, + 268 + ], + [ + 1312, + 260 + ], + [ + 1307, + 253 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 666, + 211 + ], + [ + 666, + 221 + ], + [ + 672, + 222 + ], + [ + 690, + 222 + ], + [ + 692, + 218 + ], + [ + 688, + 211 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1071, + 274 + ], + [ + 1073, + 286 + ], + [ + 1096, + 286 + ], + [ + 1096, + 272 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1101, + 209 + ], + [ + 1098, + 213 + ], + [ + 1101, + 217 + ], + [ + 1117, + 218 + ], + [ + 1123, + 212 + ], + [ + 1120, + 208 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1086, + 296 + ], + [ + 1101, + 295 + ], + [ + 1102, + 303 + ], + [ + 1086, + 305 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1183, + 440 + ], + [ + 1183, + 378 + ], + [ + 1179, + 377 + ], + [ + 1180, + 375 + ], + [ + 1181, + 370 + ], + [ + 1180, + 365 + ], + [ + 1178, + 361 + ], + [ + 1175, + 360 + ], + [ + 1172, + 351 + ], + [ + 1170, + 345 + ], + [ + 1168, + 338 + ], + [ + 1164, + 329 + ], + [ + 1160, + 322 + ], + [ + 1153, + 318 + ], + [ + 1147, + 314 + ], + [ + 1142, + 311 + ], + [ + 1138, + 309 + ], + [ + 1135, + 309 + ], + [ + 1130, + 310 + ], + [ + 1126, + 310 + ], + [ + 1121, + 317 + ], + [ + 1118, + 322 + ], + [ + 1112, + 327 + ], + [ + 1109, + 331 + ], + [ + 1104, + 336 + ], + [ + 1098, + 341 + ], + [ + 1096, + 343 + ], + [ + 1093, + 341 + ], + [ + 1093, + 347 + ], + [ + 1093, + 348 + ], + [ + 1091, + 348 + ], + [ + 1089, + 353 + ], + [ + 1083, + 356 + ], + [ + 1079, + 360 + ], + [ + 1077, + 364 + ], + [ + 1075, + 369 + ], + [ + 1075, + 371 + ], + [ + 1071, + 373 + ], + [ + 1067, + 375 + ], + [ + 1067, + 381 + ], + [ + 1068, + 390 + ], + [ + 1066, + 396 + ], + [ + 1066, + 400 + ], + [ + 1061, + 401 + ], + [ + 1055, + 396 + ], + [ + 1056, + 388 + ], + [ + 1052, + 380 + ], + [ + 1053, + 372 + ], + [ + 1058, + 363 + ], + [ + 1061, + 358 + ], + [ + 1058, + 350 + ], + [ + 1051, + 347 + ], + [ + 1049, + 344 + ], + [ + 1046, + 337 + ], + [ + 1040, + 330 + ], + [ + 1036, + 326 + ], + [ + 1031, + 324 + ], + [ + 1025, + 325 + ], + [ + 1024, + 318 + ], + [ + 1020, + 315 + ], + [ + 1015, + 320 + ], + [ + 1012, + 322 + ], + [ + 1009, + 325 + ], + [ + 1003, + 327 + ], + [ + 1001, + 326 + ], + [ + 994, + 325 + ], + [ + 992, + 329 + ], + [ + 989, + 333 + ], + [ + 985, + 331 + ], + [ + 980, + 336 + ], + [ + 979, + 339 + ], + [ + 971, + 334 + ], + [ + 966, + 339 + ], + [ + 965, + 344 + ], + [ + 964, + 349 + ], + [ + 960, + 358 + ], + [ + 959, + 364 + ], + [ + 956, + 371 + ], + [ + 954, + 377 + ], + [ + 948, + 379 + ], + [ + 940, + 380 + ], + [ + 940, + 388 + ], + [ + 945, + 399 + ], + [ + 946, + 402 + ], + [ + 950, + 408 + ], + [ + 953, + 416 + ], + [ + 957, + 418 + ], + [ + 960, + 418 + ], + [ + 969, + 415 + ], + [ + 978, + 413 + ], + [ + 983, + 414 + ], + [ + 972, + 419 + ], + [ + 971, + 425 + ], + [ + 973, + 426 + ], + [ + 986, + 426 + ], + [ + 989, + 427 + ], + [ + 988, + 431 + ], + [ + 989, + 433 + ], + [ + 994, + 436 + ], + [ + 993, + 440 + ], + [ + 989, + 444 + ], + [ + 988, + 450 + ], + [ + 994, + 457 + ], + [ + 998, + 458 + ], + [ + 1006, + 462 + ], + [ + 1006, + 466 + ], + [ + 1009, + 476 + ], + [ + 1012, + 477 + ], + [ + 1012, + 465 + ], + [ + 1011, + 448 + ], + [ + 1016, + 446 + ], + [ + 1024, + 444 + ], + [ + 1029, + 448 + ], + [ + 1031, + 475 + ], + [ + 1031, + 477 + ], + [ + 1070, + 476 + ], + [ + 1141, + 478 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 695, + 402 + ], + [ + 701, + 391 + ], + [ + 703, + 388 + ], + [ + 710, + 386 + ], + [ + 715, + 382 + ], + [ + 712, + 374 + ], + [ + 707, + 367 + ], + [ + 706, + 362 + ], + [ + 710, + 360 + ], + [ + 717, + 359 + ], + [ + 719, + 362 + ], + [ + 720, + 369 + ], + [ + 725, + 373 + ], + [ + 734, + 372 + ], + [ + 735, + 362 + ], + [ + 736, + 355 + ], + [ + 740, + 351 + ], + [ + 738, + 330 + ], + [ + 733, + 319 + ], + [ + 729, + 314 + ], + [ + 725, + 311 + ], + [ + 714, + 305 + ], + [ + 711, + 299 + ], + [ + 715, + 294 + ], + [ + 721, + 298 + ], + [ + 725, + 290 + ], + [ + 735, + 282 + ], + [ + 744, + 278 + ], + [ + 757, + 278 + ], + [ + 762, + 281 + ], + [ + 764, + 288 + ], + [ + 774, + 284 + ], + [ + 780, + 288 + ], + [ + 784, + 284 + ], + [ + 785, + 277 + ], + [ + 792, + 274 + ], + [ + 802, + 271 + ], + [ + 804, + 277 + ], + [ + 808, + 270 + ], + [ + 812, + 263 + ], + [ + 823, + 269 + ], + [ + 835, + 274 + ], + [ + 842, + 288 + ], + [ + 849, + 295 + ], + [ + 848, + 303 + ], + [ + 849, + 310 + ], + [ + 853, + 306 + ], + [ + 856, + 314 + ], + [ + 862, + 328 + ], + [ + 864, + 333 + ], + [ + 856, + 337 + ], + [ + 856, + 341 + ], + [ + 857, + 342 + ], + [ + 867, + 344 + ], + [ + 867, + 344 + ], + [ + 868, + 351 + ], + [ + 868, + 359 + ], + [ + 873, + 363 + ], + [ + 881, + 363 + ], + [ + 889, + 361 + ], + [ + 890, + 369 + ], + [ + 892, + 376 + ], + [ + 891, + 381 + ], + [ + 890, + 394 + ], + [ + 893, + 393 + ], + [ + 900, + 394 + ], + [ + 896, + 399 + ], + [ + 892, + 412 + ], + [ + 885, + 414 + ], + [ + 885, + 419 + ], + [ + 887, + 426 + ], + [ + 890, + 431 + ], + [ + 892, + 422 + ], + [ + 894, + 417 + ], + [ + 909, + 429 + ], + [ + 914, + 430 + ], + [ + 921, + 437 + ], + [ + 928, + 440 + ], + [ + 935, + 442 + ], + [ + 941, + 450 + ], + [ + 947, + 443 + ], + [ + 957, + 441 + ], + [ + 970, + 445 + ], + [ + 975, + 445 + ], + [ + 971, + 454 + ], + [ + 972, + 461 + ], + [ + 976, + 474 + ], + [ + 977, + 476 + ], + [ + 840, + 478 + ], + [ + 838, + 470 + ], + [ + 838, + 461 + ], + [ + 838, + 453 + ], + [ + 840, + 448 + ], + [ + 845, + 445 + ], + [ + 816, + 444 + ], + [ + 813, + 453 + ], + [ + 810, + 463 + ], + [ + 810, + 470 + ], + [ + 807, + 480 + ], + [ + 765, + 480 + ], + [ + 685, + 424 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 874, + 342 + ], + [ + 876, + 348 + ], + [ + 887, + 347 + ], + [ + 887, + 343 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 868, + 387 + ], + [ + 869, + 477 + ], + [ + 872, + 477 + ], + [ + 870, + 386 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 931, + 423 + ], + [ + 932, + 475 + ], + [ + 935, + 475 + ], + [ + 933, + 422 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 964, + 447 + ], + [ + 965, + 476 + ], + [ + 966, + 476 + ], + [ + 966, + 442 + ], + [ + 964, + 441 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1042, + 448 + ], + [ + 1041, + 462 + ], + [ + 1049, + 462 + ], + [ + 1048, + 449 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1103, + 378 + ], + [ + 1137, + 383 + ], + [ + 1159, + 388 + ], + [ + 1162, + 393 + ], + [ + 1162, + 440 + ], + [ + 1164, + 440 + ], + [ + 1164, + 405 + ], + [ + 1164, + 389 + ], + [ + 1158, + 384 + ], + [ + 1137, + 380 + ], + [ + 1100, + 375 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1164, + 318 + ], + [ + 1164, + 432 + ], + [ + 1167, + 432 + ], + [ + 1167, + 318 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1095, + 370 + ], + [ + 1095, + 392 + ], + [ + 1105, + 393 + ], + [ + 1104, + 369 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1014, + 361 + ], + [ + 1015, + 477 + ], + [ + 1018, + 477 + ], + [ + 1016, + 361 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1020, + 425 + ], + [ + 1014, + 425 + ], + [ + 1014, + 443 + ], + [ + 1020, + 443 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 823, + 404 + ], + [ + 850, + 404 + ], + [ + 851, + 419 + ], + [ + 824, + 420 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 838, + 426 + ], + [ + 831, + 426 + ], + [ + 833, + 442 + ], + [ + 838, + 441 + ], + [ + 846, + 442 + ], + [ + 846, + 429 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 836, + 388 + ], + [ + 838, + 479 + ], + [ + 840, + 479 + ], + [ + 838, + 387 + ], + [ + 837, + 380 + ], + [ + 815, + 373 + ], + [ + 775, + 368 + ], + [ + 774, + 371 + ], + [ + 827, + 380 + ], + [ + 835, + 383 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 768, + 365 + ], + [ + 769, + 386 + ], + [ + 778, + 386 + ], + [ + 777, + 365 + ] + ] + }, + { + "label": "persongroup", + "polygon": [ + [ + 48, + 434 + ], + [ + 48, + 423 + ], + [ + 41, + 421 + ], + [ + 36, + 422 + ], + [ + 30, + 428 + ], + [ + 28, + 426 + ], + [ + 21, + 432 + ], + [ + 18, + 433 + ], + [ + 18, + 425 + ], + [ + 11, + 420 + ], + [ + 6, + 419 + ], + [ + 1, + 426 + ], + [ + 7, + 439 + ], + [ + 19, + 448 + ], + [ + 42, + 449 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 280, + 430 + ], + [ + 275, + 478 + ], + [ + 294, + 473 + ], + [ + 292, + 449 + ], + [ + 289, + 435 + ], + [ + 289, + 429 + ], + [ + 285, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 454, + 418 + ], + [ + 445, + 409 + ], + [ + 422, + 407 + ], + [ + 408, + 410 + ], + [ + 380, + 409 + ], + [ + 355, + 408 + ], + [ + 322, + 421 + ], + [ + 299, + 442 + ], + [ + 281, + 463 + ], + [ + 270, + 476 + ], + [ + 266, + 489 + ], + [ + 269, + 505 + ], + [ + 275, + 517 + ], + [ + 279, + 525 + ], + [ + 285, + 528 + ], + [ + 292, + 528 + ], + [ + 297, + 525 + ], + [ + 302, + 514 + ], + [ + 358, + 515 + ], + [ + 361, + 521 + ], + [ + 365, + 522 + ], + [ + 374, + 520 + ], + [ + 407, + 492 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 201, + 420 + ], + [ + 155, + 418 + ], + [ + 134, + 423 + ], + [ + 113, + 422 + ], + [ + 71, + 422 + ], + [ + 60, + 429 + ], + [ + 42, + 434 + ], + [ + 21, + 446 + ], + [ + 41, + 526 + ], + [ + 51, + 540 + ], + [ + 73, + 536 + ], + [ + 77, + 543 + ], + [ + 81, + 548 + ], + [ + 92, + 550 + ], + [ + 106, + 551 + ], + [ + 119, + 547 + ], + [ + 129, + 531 + ], + [ + 189, + 524 + ], + [ + 193, + 533 + ], + [ + 202, + 542 + ], + [ + 215, + 542 + ], + [ + 223, + 536 + ], + [ + 227, + 518 + ], + [ + 240, + 513 + ], + [ + 241, + 503 + ], + [ + 240, + 481 + ], + [ + 236, + 459 + ], + [ + 222, + 432 + ], + [ + 207, + 422 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 22, + 558 + ], + [ + 24, + 568 + ], + [ + 28, + 572 + ], + [ + 38, + 573 + ], + [ + 44, + 571 + ], + [ + 49, + 566 + ], + [ + 53, + 555 + ], + [ + 55, + 532 + ], + [ + 52, + 496 + ], + [ + 46, + 471 + ], + [ + 28, + 443 + ], + [ + 15, + 429 + ], + [ + 0, + 422 + ], + [ + 0, + 563 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 574, + 394 + ], + [ + 573, + 202 + ], + [ + 580, + 202 + ], + [ + 580, + 395 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 724, + 325 + ], + [ + 725, + 411 + ], + [ + 730, + 412 + ], + [ + 728, + 325 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 685, + 390 + ], + [ + 550, + 391 + ], + [ + 550, + 385 + ], + [ + 547, + 368 + ], + [ + 534, + 364 + ], + [ + 510, + 365 + ], + [ + 492, + 367 + ], + [ + 488, + 369 + ], + [ + 485, + 378 + ], + [ + 486, + 383 + ], + [ + 489, + 388 + ], + [ + 487, + 395 + ], + [ + 467, + 402 + ], + [ + 435, + 430 + ], + [ + 397, + 468 + ], + [ + 375, + 499 + ], + [ + 369, + 512 + ], + [ + 365, + 559 + ], + [ + 372, + 589 + ], + [ + 384, + 612 + ], + [ + 392, + 621 + ], + [ + 394, + 634 + ], + [ + 400, + 645 + ], + [ + 410, + 652 + ], + [ + 422, + 655 + ], + [ + 436, + 655 + ], + [ + 447, + 649 + ], + [ + 454, + 636 + ], + [ + 456, + 626 + ], + [ + 518, + 624 + ], + [ + 685, + 625 + ], + [ + 690, + 638 + ], + [ + 697, + 645 + ], + [ + 711, + 649 + ], + [ + 723, + 649 + ], + [ + 736, + 645 + ], + [ + 740, + 638 + ], + [ + 742, + 624 + ], + [ + 743, + 617 + ], + [ + 744, + 601 + ], + [ + 767, + 592 + ], + [ + 771, + 606 + ], + [ + 775, + 611 + ], + [ + 786, + 612 + ], + [ + 793, + 612 + ], + [ + 801, + 608 + ], + [ + 805, + 601 + ], + [ + 809, + 575 + ], + [ + 809, + 542 + ], + [ + 805, + 501 + ], + [ + 780, + 473 + ], + [ + 797, + 472 + ], + [ + 800, + 467 + ], + [ + 797, + 453 + ], + [ + 783, + 450 + ], + [ + 771, + 450 + ], + [ + 768, + 451 + ], + [ + 770, + 461 + ], + [ + 770, + 467 + ], + [ + 747, + 429 + ], + [ + 730, + 409 + ], + [ + 713, + 397 + ], + [ + 695, + 391 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 468, + 504 + ], + [ + 469, + 528 + ], + [ + 570, + 528 + ], + [ + 572, + 504 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1109, + 449 + ], + [ + 1093, + 448 + ], + [ + 1093, + 445 + ], + [ + 1082, + 445 + ], + [ + 1082, + 448 + ], + [ + 1078, + 450 + ], + [ + 1073, + 459 + ], + [ + 1071, + 457 + ], + [ + 1070, + 458 + ], + [ + 1071, + 462 + ], + [ + 1071, + 471 + ], + [ + 1070, + 477 + ], + [ + 1079, + 476 + ], + [ + 1103, + 478 + ], + [ + 1110, + 478 + ], + [ + 1114, + 481 + ], + [ + 1115, + 470 + ], + [ + 1114, + 461 + ], + [ + 1112, + 455 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1345, + 419 + ], + [ + 1353, + 432 + ], + [ + 1363, + 442 + ], + [ + 1361, + 456 + ], + [ + 1358, + 470 + ], + [ + 1337, + 478 + ], + [ + 1341, + 462 + ], + [ + 1341, + 441 + ], + [ + 1336, + 423 + ], + [ + 1335, + 414 + ], + [ + 1340, + 414 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1321, + 417 + ], + [ + 1317, + 416 + ], + [ + 1313, + 418 + ], + [ + 1311, + 427 + ], + [ + 1310, + 439 + ], + [ + 1307, + 447 + ], + [ + 1308, + 455 + ], + [ + 1310, + 470 + ], + [ + 1313, + 486 + ], + [ + 1322, + 507 + ], + [ + 1331, + 525 + ], + [ + 1333, + 530 + ], + [ + 1334, + 540 + ], + [ + 1337, + 544 + ], + [ + 1346, + 540 + ], + [ + 1349, + 533 + ], + [ + 1347, + 523 + ], + [ + 1337, + 509 + ], + [ + 1329, + 486 + ], + [ + 1329, + 459 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1362, + 480 + ], + [ + 1362, + 502 + ], + [ + 1357, + 516 + ], + [ + 1350, + 519 + ], + [ + 1344, + 514 + ], + [ + 1343, + 503 + ], + [ + 1341, + 483 + ], + [ + 1339, + 471 + ], + [ + 1345, + 470 + ], + [ + 1357, + 470 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1318, + 436 + ], + [ + 1311, + 454 + ], + [ + 1311, + 473 + ], + [ + 1315, + 493 + ], + [ + 1317, + 509 + ], + [ + 1317, + 520 + ], + [ + 1316, + 529 + ], + [ + 1310, + 537 + ], + [ + 1305, + 543 + ], + [ + 1311, + 547 + ], + [ + 1320, + 545 + ], + [ + 1331, + 544 + ], + [ + 1334, + 543 + ], + [ + 1338, + 537 + ], + [ + 1335, + 523 + ], + [ + 1337, + 509 + ], + [ + 1342, + 479 + ], + [ + 1343, + 464 + ], + [ + 1343, + 446 + ], + [ + 1342, + 434 + ], + [ + 1338, + 420 + ], + [ + 1334, + 413 + ], + [ + 1334, + 405 + ], + [ + 1330, + 400 + ], + [ + 1322, + 399 + ], + [ + 1317, + 405 + ], + [ + 1317, + 411 + ], + [ + 1318, + 417 + ], + [ + 1319, + 422 + ], + [ + 1319, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1145, + 453 + ], + [ + 1149, + 449 + ], + [ + 1154, + 438 + ], + [ + 1159, + 432 + ], + [ + 1206, + 434 + ], + [ + 1210, + 443 + ], + [ + 1218, + 453 + ], + [ + 1222, + 464 + ], + [ + 1222, + 473 + ], + [ + 1222, + 474 + ], + [ + 1219, + 477 + ], + [ + 1220, + 486 + ], + [ + 1220, + 492 + ], + [ + 1217, + 497 + ], + [ + 1215, + 497 + ], + [ + 1209, + 497 + ], + [ + 1208, + 497 + ], + [ + 1207, + 493 + ], + [ + 1206, + 488 + ], + [ + 1157, + 486 + ], + [ + 1157, + 490 + ], + [ + 1156, + 493 + ], + [ + 1156, + 493 + ], + [ + 1154, + 496 + ], + [ + 1151, + 496 + ], + [ + 1148, + 496 + ], + [ + 1144, + 496 + ], + [ + 1142, + 496 + ], + [ + 1142, + 492 + ], + [ + 1142, + 490 + ], + [ + 1142, + 486 + ], + [ + 1143, + 478 + ], + [ + 1143, + 465 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000157_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000157_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..8cb0982e632f0b951749560da75560930149854d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000157_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000157_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000157_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d0af04260882a23a4fef58b07db9dfdcf7287054 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000157_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000158_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000158_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..a202e2fb2862315ddd41475712e3a3f6a3b3102b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000158_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000158_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000158_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..4cf2b75faf783141fa62d56ea48a4bcdca73d148 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000158_000019_gtFine_polygons.json @@ -0,0 +1,6217 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 25, + 307 + ], + [ + 2048, + 239 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 293 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1262, + 436 + ], + [ + 1238, + 435 + ], + [ + 1206, + 432 + ], + [ + 1202, + 428 + ], + [ + 1207, + 425 + ], + [ + 1226, + 424 + ], + [ + 1242, + 422 + ], + [ + 1258, + 422 + ], + [ + 1267, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1107, + 407 + ], + [ + 1101, + 410 + ], + [ + 1099, + 420 + ], + [ + 1099, + 424 + ], + [ + 1099, + 426 + ], + [ + 1100, + 428 + ], + [ + 1101, + 429 + ], + [ + 1109, + 429 + ], + [ + 1114, + 428 + ], + [ + 1116, + 419 + ], + [ + 1116, + 405 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 531, + 29 + ], + [ + 604, + 151 + ], + [ + 754, + 243 + ], + [ + 918, + 280 + ], + [ + 1027, + 291 + ], + [ + 1118, + 253 + ], + [ + 1182, + 211 + ], + [ + 1236, + 161 + ], + [ + 1274, + 86 + ], + [ + 1334, + 0 + ], + [ + 518, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 0, + 694 + ], + [ + 809, + 515 + ], + [ + 817, + 497 + ], + [ + 807, + 489 + ], + [ + 729, + 496 + ], + [ + 0, + 587 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 15, + 505 + ], + [ + 818, + 459 + ], + [ + 972, + 426 + ], + [ + 1096, + 380 + ], + [ + 1013, + 255 + ], + [ + 971, + 251 + ], + [ + 970, + 259 + ], + [ + 956, + 250 + ], + [ + 954, + 173 + ], + [ + 937, + 166 + ], + [ + 912, + 168 + ], + [ + 913, + 154 + ], + [ + 898, + 142 + ], + [ + 836, + 147 + ], + [ + 839, + 172 + ], + [ + 794, + 175 + ], + [ + 794, + 185 + ], + [ + 766, + 185 + ], + [ + 764, + 195 + ], + [ + 765, + 170 + ], + [ + 761, + 155 + ], + [ + 742, + 152 + ], + [ + 712, + 158 + ], + [ + 707, + 136 + ], + [ + 691, + 136 + ], + [ + 685, + 137 + ], + [ + 656, + 140 + ], + [ + 656, + 122 + ], + [ + 609, + 81 + ], + [ + 278, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 505 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 649, + 217 + ], + [ + 648, + 235 + ], + [ + 643, + 241 + ], + [ + 644, + 264 + ], + [ + 647, + 271 + ], + [ + 651, + 279 + ], + [ + 653, + 285 + ], + [ + 654, + 297 + ], + [ + 654, + 314 + ], + [ + 654, + 320 + ], + [ + 657, + 332 + ], + [ + 670, + 339 + ], + [ + 693, + 356 + ], + [ + 705, + 368 + ], + [ + 735, + 373 + ], + [ + 763, + 379 + ], + [ + 778, + 382 + ], + [ + 786, + 378 + ], + [ + 787, + 377 + ], + [ + 788, + 431 + ], + [ + 796, + 431 + ], + [ + 792, + 375 + ], + [ + 800, + 373 + ], + [ + 808, + 371 + ], + [ + 818, + 375 + ], + [ + 831, + 373 + ], + [ + 838, + 372 + ], + [ + 850, + 368 + ], + [ + 853, + 367 + ], + [ + 860, + 375 + ], + [ + 871, + 377 + ], + [ + 877, + 379 + ], + [ + 878, + 404 + ], + [ + 880, + 404 + ], + [ + 881, + 381 + ], + [ + 885, + 380 + ], + [ + 896, + 376 + ], + [ + 905, + 377 + ], + [ + 909, + 382 + ], + [ + 918, + 384 + ], + [ + 922, + 406 + ], + [ + 933, + 407 + ], + [ + 936, + 393 + ], + [ + 944, + 390 + ], + [ + 950, + 386 + ], + [ + 956, + 380 + ], + [ + 956, + 372 + ], + [ + 964, + 369 + ], + [ + 972, + 368 + ], + [ + 984, + 368 + ], + [ + 994, + 361 + ], + [ + 997, + 348 + ], + [ + 992, + 333 + ], + [ + 990, + 313 + ], + [ + 989, + 287 + ], + [ + 988, + 280 + ], + [ + 974, + 280 + ], + [ + 967, + 273 + ], + [ + 945, + 266 + ], + [ + 945, + 266 + ], + [ + 933, + 272 + ], + [ + 924, + 270 + ], + [ + 918, + 275 + ], + [ + 910, + 286 + ], + [ + 901, + 287 + ], + [ + 873, + 283 + ], + [ + 853, + 280 + ], + [ + 838, + 272 + ], + [ + 840, + 260 + ], + [ + 837, + 250 + ], + [ + 809, + 241 + ], + [ + 797, + 235 + ], + [ + 806, + 220 + ], + [ + 785, + 213 + ], + [ + 766, + 207 + ], + [ + 741, + 199 + ], + [ + 727, + 187 + ], + [ + 720, + 182 + ], + [ + 713, + 191 + ], + [ + 708, + 203 + ], + [ + 685, + 210 + ], + [ + 676, + 208 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 790, + 338 + ], + [ + 774, + 332 + ], + [ + 775, + 388 + ], + [ + 782, + 372 + ], + [ + 790, + 370 + ], + [ + 790, + 364 + ], + [ + 780, + 363 + ], + [ + 780, + 359 + ], + [ + 790, + 357 + ], + [ + 790, + 351 + ], + [ + 778, + 350 + ], + [ + 778, + 345 + ], + [ + 789, + 345 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 661, + 145 + ], + [ + 672, + 335 + ], + [ + 685, + 335 + ], + [ + 671, + 143 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 720, + 327 + ], + [ + 701, + 327 + ], + [ + 590, + 330 + ], + [ + 595, + 334 + ], + [ + 639, + 339 + ], + [ + 638, + 342 + ], + [ + 641, + 435 + ], + [ + 717, + 516 + ], + [ + 788, + 506 + ], + [ + 775, + 330 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2014, + 541 + ], + [ + 1518, + 453 + ], + [ + 1355, + 406 + ], + [ + 1195, + 415 + ], + [ + 1205, + 378 + ], + [ + 1217, + 378 + ], + [ + 1438, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 550 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1342, + 394 + ], + [ + 1324, + 393 + ], + [ + 1321, + 397 + ], + [ + 1317, + 399 + ], + [ + 1309, + 402 + ], + [ + 1303, + 399 + ], + [ + 1292, + 403 + ], + [ + 1278, + 403 + ], + [ + 1272, + 403 + ], + [ + 1267, + 407 + ], + [ + 1261, + 407 + ], + [ + 1251, + 406 + ], + [ + 1244, + 405 + ], + [ + 1238, + 407 + ], + [ + 1235, + 408 + ], + [ + 1229, + 419 + ], + [ + 1229, + 425 + ], + [ + 1234, + 425 + ], + [ + 1241, + 425 + ], + [ + 1251, + 426 + ], + [ + 1262, + 426 + ], + [ + 1281, + 427 + ], + [ + 1307, + 430 + ], + [ + 1312, + 430 + ], + [ + 1320, + 430 + ], + [ + 1328, + 429 + ], + [ + 1333, + 419 + ], + [ + 1336, + 411 + ], + [ + 1341, + 405 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 195, + 444 + ], + [ + 197, + 343 + ], + [ + 193, + 332 + ], + [ + 187, + 325 + ], + [ + 185, + 318 + ], + [ + 176, + 319 + ], + [ + 160, + 325 + ], + [ + 152, + 334 + ], + [ + 141, + 331 + ], + [ + 152, + 311 + ], + [ + 150, + 290 + ], + [ + 156, + 270 + ], + [ + 132, + 260 + ], + [ + 120, + 268 + ], + [ + 97, + 270 + ], + [ + 94, + 262 + ], + [ + 105, + 248 + ], + [ + 98, + 236 + ], + [ + 73, + 241 + ], + [ + 44, + 235 + ], + [ + 30, + 235 + ], + [ + 22, + 246 + ], + [ + 9, + 255 + ], + [ + 0, + 254 + ], + [ + 0, + 0 + ], + [ + 622, + 0 + ], + [ + 617, + 7 + ], + [ + 615, + 16 + ], + [ + 622, + 16 + ], + [ + 628, + 16 + ], + [ + 635, + 22 + ], + [ + 631, + 27 + ], + [ + 624, + 32 + ], + [ + 620, + 32 + ], + [ + 618, + 36 + ], + [ + 622, + 56 + ], + [ + 628, + 65 + ], + [ + 624, + 83 + ], + [ + 631, + 107 + ], + [ + 637, + 120 + ], + [ + 634, + 128 + ], + [ + 633, + 150 + ], + [ + 642, + 443 + ], + [ + 590, + 443 + ], + [ + 595, + 356 + ], + [ + 595, + 252 + ], + [ + 590, + 252 + ], + [ + 583, + 265 + ], + [ + 577, + 287 + ], + [ + 579, + 304 + ], + [ + 575, + 317 + ], + [ + 562, + 323 + ], + [ + 553, + 342 + ], + [ + 547, + 344 + ], + [ + 534, + 322 + ], + [ + 527, + 305 + ], + [ + 522, + 282 + ], + [ + 502, + 254 + ], + [ + 498, + 230 + ], + [ + 481, + 247 + ], + [ + 457, + 257 + ], + [ + 436, + 272 + ], + [ + 413, + 284 + ], + [ + 395, + 293 + ], + [ + 384, + 334 + ], + [ + 381, + 356 + ], + [ + 382, + 385 + ], + [ + 383, + 425 + ], + [ + 379, + 453 + ], + [ + 352, + 452 + ], + [ + 343, + 438 + ], + [ + 341, + 413 + ], + [ + 338, + 349 + ], + [ + 330, + 309 + ], + [ + 269, + 233 + ], + [ + 251, + 270 + ], + [ + 261, + 277 + ], + [ + 275, + 286 + ], + [ + 275, + 289 + ], + [ + 256, + 289 + ], + [ + 248, + 288 + ], + [ + 247, + 288 + ], + [ + 236, + 285 + ], + [ + 231, + 296 + ], + [ + 245, + 297 + ], + [ + 259, + 299 + ], + [ + 274, + 302 + ], + [ + 286, + 303 + ], + [ + 298, + 312 + ], + [ + 299, + 315 + ], + [ + 285, + 316 + ], + [ + 279, + 321 + ], + [ + 282, + 327 + ], + [ + 281, + 337 + ], + [ + 270, + 341 + ], + [ + 257, + 341 + ], + [ + 242, + 336 + ], + [ + 229, + 336 + ], + [ + 220, + 333 + ], + [ + 206, + 334 + ], + [ + 205, + 439 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 119, + 350 + ], + [ + 108, + 354 + ], + [ + 102, + 365 + ], + [ + 102, + 376 + ], + [ + 99, + 413 + ], + [ + 104, + 435 + ], + [ + 133, + 439 + ], + [ + 157, + 431 + ], + [ + 171, + 424 + ], + [ + 170, + 412 + ], + [ + 158, + 386 + ], + [ + 148, + 367 + ], + [ + 131, + 354 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 329, + 358 + ], + [ + 305, + 361 + ], + [ + 305, + 366 + ], + [ + 313, + 366 + ], + [ + 313, + 375 + ], + [ + 306, + 376 + ], + [ + 307, + 383 + ], + [ + 317, + 383 + ], + [ + 318, + 390 + ], + [ + 310, + 389 + ], + [ + 311, + 396 + ], + [ + 324, + 398 + ], + [ + 331, + 400 + ], + [ + 334, + 390 + ], + [ + 332, + 370 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 331, + 357 + ], + [ + 341, + 357 + ], + [ + 341, + 361 + ], + [ + 353, + 359 + ], + [ + 353, + 364 + ], + [ + 344, + 366 + ], + [ + 345, + 374 + ], + [ + 355, + 372 + ], + [ + 356, + 378 + ], + [ + 349, + 379 + ], + [ + 349, + 386 + ], + [ + 359, + 384 + ], + [ + 359, + 397 + ], + [ + 336, + 400 + ], + [ + 333, + 386 + ], + [ + 331, + 368 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 325, + 344 + ], + [ + 327, + 381 + ], + [ + 335, + 380 + ], + [ + 334, + 343 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 367, + 417 + ], + [ + 363, + 400 + ], + [ + 359, + 393 + ], + [ + 348, + 390 + ], + [ + 348, + 381 + ], + [ + 345, + 373 + ], + [ + 341, + 371 + ], + [ + 337, + 372 + ], + [ + 330, + 375 + ], + [ + 327, + 378 + ], + [ + 325, + 382 + ], + [ + 325, + 387 + ], + [ + 325, + 391 + ], + [ + 327, + 398 + ], + [ + 325, + 405 + ], + [ + 320, + 418 + ], + [ + 322, + 430 + ], + [ + 327, + 445 + ], + [ + 357, + 445 + ], + [ + 366, + 436 + ], + [ + 368, + 429 + ], + [ + 368, + 424 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 553, + 395 + ], + [ + 559, + 396 + ], + [ + 568, + 402 + ], + [ + 570, + 411 + ], + [ + 574, + 421 + ], + [ + 579, + 429 + ], + [ + 579, + 431 + ], + [ + 558, + 435 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 498, + 349 + ], + [ + 500, + 368 + ], + [ + 520, + 370 + ], + [ + 519, + 347 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 535, + 365 + ], + [ + 532, + 374 + ], + [ + 518, + 371 + ], + [ + 513, + 366 + ], + [ + 507, + 364 + ], + [ + 498, + 364 + ], + [ + 493, + 364 + ], + [ + 490, + 370 + ], + [ + 490, + 376 + ], + [ + 495, + 382 + ], + [ + 504, + 387 + ], + [ + 505, + 394 + ], + [ + 498, + 416 + ], + [ + 498, + 427 + ], + [ + 508, + 445 + ], + [ + 529, + 445 + ], + [ + 543, + 441 + ], + [ + 565, + 438 + ], + [ + 575, + 431 + ], + [ + 570, + 428 + ], + [ + 568, + 419 + ], + [ + 565, + 409 + ], + [ + 561, + 394 + ], + [ + 561, + 385 + ], + [ + 563, + 379 + ], + [ + 561, + 370 + ], + [ + 545, + 365 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 247, + 100 + ], + [ + 0, + 119 + ], + [ + 0, + 113 + ], + [ + 243, + 95 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 182, + 41 + ], + [ + 182, + 54 + ], + [ + 144, + 49 + ], + [ + 146, + 40 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 422, + 337 + ], + [ + 427, + 381 + ], + [ + 428, + 381 + ], + [ + 425, + 331 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 428, + 376 + ], + [ + 421, + 380 + ], + [ + 422, + 387 + ], + [ + 425, + 393 + ], + [ + 434, + 393 + ], + [ + 436, + 389 + ], + [ + 436, + 384 + ], + [ + 433, + 377 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 428, + 438 + ], + [ + 428, + 384 + ], + [ + 423, + 383 + ], + [ + 424, + 429 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 424, + 316 + ], + [ + 426, + 342 + ], + [ + 419, + 342 + ], + [ + 419, + 337 + ], + [ + 415, + 337 + ], + [ + 415, + 334 + ], + [ + 419, + 333 + ], + [ + 419, + 330 + ], + [ + 413, + 330 + ], + [ + 413, + 327 + ], + [ + 418, + 326 + ], + [ + 418, + 324 + ], + [ + 413, + 323 + ], + [ + 413, + 318 + ], + [ + 417, + 317 + ], + [ + 418, + 316 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 883, + 234 + ], + [ + 892, + 406 + ], + [ + 899, + 406 + ], + [ + 890, + 235 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 967, + 180 + ], + [ + 901, + 184 + ], + [ + 775, + 215 + ], + [ + 762, + 229 + ], + [ + 757, + 245 + ], + [ + 758, + 282 + ], + [ + 761, + 283 + ], + [ + 763, + 277 + ], + [ + 763, + 238 + ], + [ + 778, + 218 + ], + [ + 840, + 201 + ], + [ + 899, + 188 + ], + [ + 972, + 184 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 965, + 168 + ], + [ + 968, + 216 + ], + [ + 968, + 218 + ], + [ + 975, + 218 + ], + [ + 988, + 217 + ], + [ + 989, + 166 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 822, + 380 + ], + [ + 822, + 399 + ], + [ + 823, + 404 + ], + [ + 824, + 406 + ], + [ + 829, + 406 + ], + [ + 842, + 408 + ], + [ + 843, + 405 + ], + [ + 838, + 381 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 829, + 379 + ], + [ + 832, + 414 + ], + [ + 835, + 414 + ], + [ + 831, + 379 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 932, + 358 + ], + [ + 934, + 409 + ], + [ + 937, + 410 + ], + [ + 935, + 357 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 922, + 373 + ], + [ + 926, + 395 + ], + [ + 933, + 395 + ], + [ + 933, + 367 + ], + [ + 923, + 367 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 943, + 366 + ], + [ + 945, + 394 + ], + [ + 933, + 394 + ], + [ + 932, + 367 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 942, + 404 + ], + [ + 947, + 394 + ], + [ + 925, + 393 + ], + [ + 930, + 402 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 968, + 344 + ], + [ + 974, + 426 + ], + [ + 977, + 425 + ], + [ + 972, + 343 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 987, + 443 + ], + [ + 1025, + 435 + ], + [ + 1047, + 431 + ], + [ + 1072, + 427 + ], + [ + 1074, + 422 + ], + [ + 1060, + 419 + ], + [ + 1028, + 427 + ], + [ + 964, + 436 + ], + [ + 964, + 447 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 972, + 453 + ], + [ + 991, + 454 + ], + [ + 1074, + 450 + ], + [ + 1117, + 448 + ], + [ + 1166, + 448 + ], + [ + 1172, + 446 + ], + [ + 1174, + 443 + ], + [ + 1121, + 433 + ], + [ + 1077, + 438 + ], + [ + 1009, + 447 + ], + [ + 990, + 449 + ], + [ + 972, + 451 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 959, + 384 + ], + [ + 964, + 427 + ], + [ + 974, + 425 + ], + [ + 972, + 384 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1608, + 13 + ], + [ + 1618, + 0 + ], + [ + 1196, + 0 + ], + [ + 1195, + 2 + ], + [ + 1199, + 10 + ], + [ + 1205, + 15 + ], + [ + 1206, + 24 + ], + [ + 1197, + 31 + ], + [ + 1194, + 39 + ], + [ + 1188, + 38 + ], + [ + 1174, + 44 + ], + [ + 1173, + 61 + ], + [ + 1171, + 73 + ], + [ + 1167, + 77 + ], + [ + 1173, + 80 + ], + [ + 1177, + 89 + ], + [ + 1179, + 91 + ], + [ + 1188, + 91 + ], + [ + 1194, + 99 + ], + [ + 1194, + 106 + ], + [ + 1198, + 114 + ], + [ + 1202, + 122 + ], + [ + 1195, + 121 + ], + [ + 1187, + 127 + ], + [ + 1178, + 128 + ], + [ + 1171, + 138 + ], + [ + 1158, + 154 + ], + [ + 1154, + 150 + ], + [ + 1152, + 156 + ], + [ + 1146, + 163 + ], + [ + 1144, + 171 + ], + [ + 1144, + 175 + ], + [ + 1141, + 175 + ], + [ + 1140, + 167 + ], + [ + 1136, + 155 + ], + [ + 1120, + 167 + ], + [ + 1121, + 162 + ], + [ + 1121, + 158 + ], + [ + 1122, + 152 + ], + [ + 1115, + 152 + ], + [ + 1115, + 143 + ], + [ + 1115, + 139 + ], + [ + 1107, + 142 + ], + [ + 1103, + 133 + ], + [ + 1106, + 126 + ], + [ + 1108, + 121 + ], + [ + 1100, + 117 + ], + [ + 1092, + 102 + ], + [ + 1087, + 92 + ], + [ + 1084, + 87 + ], + [ + 1075, + 93 + ], + [ + 1068, + 101 + ], + [ + 1056, + 103 + ], + [ + 1052, + 107 + ], + [ + 1052, + 117 + ], + [ + 1049, + 120 + ], + [ + 1044, + 123 + ], + [ + 1048, + 131 + ], + [ + 1047, + 136 + ], + [ + 1046, + 140 + ], + [ + 1049, + 147 + ], + [ + 1053, + 155 + ], + [ + 1046, + 151 + ], + [ + 1038, + 146 + ], + [ + 1041, + 152 + ], + [ + 1044, + 157 + ], + [ + 1046, + 170 + ], + [ + 1039, + 169 + ], + [ + 1036, + 175 + ], + [ + 1037, + 180 + ], + [ + 1043, + 181 + ], + [ + 1050, + 185 + ], + [ + 1051, + 191 + ], + [ + 1045, + 193 + ], + [ + 1040, + 186 + ], + [ + 1037, + 191 + ], + [ + 1042, + 196 + ], + [ + 1034, + 204 + ], + [ + 1026, + 209 + ], + [ + 1025, + 213 + ], + [ + 1028, + 216 + ], + [ + 1027, + 221 + ], + [ + 1024, + 228 + ], + [ + 1019, + 232 + ], + [ + 1013, + 234 + ], + [ + 1020, + 238 + ], + [ + 1015, + 246 + ], + [ + 1012, + 249 + ], + [ + 1008, + 243 + ], + [ + 1006, + 249 + ], + [ + 1005, + 257 + ], + [ + 1005, + 260 + ], + [ + 1000, + 263 + ], + [ + 998, + 269 + ], + [ + 995, + 278 + ], + [ + 999, + 285 + ], + [ + 1000, + 288 + ], + [ + 1008, + 304 + ], + [ + 1012, + 304 + ], + [ + 1025, + 307 + ], + [ + 1037, + 310 + ], + [ + 1046, + 313 + ], + [ + 993, + 287 + ], + [ + 991, + 303 + ], + [ + 993, + 321 + ], + [ + 994, + 335 + ], + [ + 999, + 340 + ], + [ + 1003, + 345 + ], + [ + 1008, + 349 + ], + [ + 1017, + 358 + ], + [ + 1019, + 366 + ], + [ + 1021, + 373 + ], + [ + 1024, + 380 + ], + [ + 1025, + 379 + ], + [ + 1036, + 367 + ], + [ + 1040, + 361 + ], + [ + 1050, + 353 + ], + [ + 1058, + 350 + ], + [ + 1069, + 351 + ], + [ + 1071, + 360 + ], + [ + 1065, + 368 + ], + [ + 1061, + 370 + ], + [ + 1057, + 373 + ], + [ + 1048, + 366 + ], + [ + 1040, + 372 + ], + [ + 1033, + 377 + ], + [ + 1025, + 381 + ], + [ + 1028, + 395 + ], + [ + 1030, + 403 + ], + [ + 1041, + 400 + ], + [ + 1049, + 400 + ], + [ + 1055, + 400 + ], + [ + 1063, + 400 + ], + [ + 1081, + 391 + ], + [ + 1085, + 394 + ], + [ + 1087, + 439 + ], + [ + 1096, + 438 + ], + [ + 1091, + 391 + ], + [ + 1096, + 391 + ], + [ + 1100, + 390 + ], + [ + 1107, + 390 + ], + [ + 1108, + 390 + ], + [ + 1110, + 436 + ], + [ + 1123, + 436 + ], + [ + 1123, + 432 + ], + [ + 1123, + 427 + ], + [ + 1122, + 391 + ], + [ + 1133, + 388 + ], + [ + 1153, + 381 + ], + [ + 1167, + 377 + ], + [ + 1179, + 376 + ], + [ + 1185, + 374 + ], + [ + 1188, + 382 + ], + [ + 1187, + 416 + ], + [ + 1194, + 418 + ], + [ + 1203, + 417 + ], + [ + 1203, + 410 + ], + [ + 1199, + 384 + ], + [ + 1199, + 377 + ], + [ + 1200, + 372 + ], + [ + 1208, + 369 + ], + [ + 1215, + 368 + ], + [ + 1216, + 373 + ], + [ + 1228, + 366 + ], + [ + 1239, + 367 + ], + [ + 1244, + 429 + ], + [ + 1254, + 428 + ], + [ + 1252, + 383 + ], + [ + 1251, + 369 + ], + [ + 1267, + 370 + ], + [ + 1271, + 404 + ], + [ + 1311, + 402 + ], + [ + 1311, + 384 + ], + [ + 1345, + 383 + ], + [ + 1340, + 283 + ], + [ + 1344, + 279 + ], + [ + 1349, + 278 + ], + [ + 1350, + 291 + ], + [ + 1351, + 311 + ], + [ + 1353, + 337 + ], + [ + 1357, + 350 + ], + [ + 1358, + 366 + ], + [ + 1356, + 379 + ], + [ + 1355, + 392 + ], + [ + 1360, + 404 + ], + [ + 1375, + 406 + ], + [ + 1386, + 404 + ], + [ + 1393, + 401 + ], + [ + 1400, + 402 + ], + [ + 1396, + 400 + ], + [ + 1396, + 389 + ], + [ + 1389, + 375 + ], + [ + 1377, + 370 + ], + [ + 1369, + 344 + ], + [ + 1365, + 329 + ], + [ + 1371, + 330 + ], + [ + 1382, + 332 + ], + [ + 1391, + 334 + ], + [ + 1396, + 357 + ], + [ + 1396, + 374 + ], + [ + 1396, + 388 + ], + [ + 1396, + 399 + ], + [ + 1399, + 402 + ], + [ + 1414, + 403 + ], + [ + 1411, + 397 + ], + [ + 1409, + 374 + ], + [ + 1403, + 345 + ], + [ + 1400, + 330 + ], + [ + 1401, + 322 + ], + [ + 1409, + 332 + ], + [ + 1412, + 356 + ], + [ + 1415, + 375 + ], + [ + 1417, + 389 + ], + [ + 1418, + 399 + ], + [ + 1419, + 417 + ], + [ + 1418, + 432 + ], + [ + 1421, + 435 + ], + [ + 1427, + 438 + ], + [ + 1447, + 437 + ], + [ + 1452, + 424 + ], + [ + 1453, + 409 + ], + [ + 1447, + 314 + ], + [ + 1444, + 305 + ], + [ + 1444, + 295 + ], + [ + 1462, + 300 + ], + [ + 1465, + 296 + ], + [ + 1464, + 281 + ], + [ + 1461, + 277 + ], + [ + 1464, + 275 + ], + [ + 1462, + 201 + ], + [ + 1462, + 190 + ], + [ + 1462, + 178 + ], + [ + 1464, + 170 + ], + [ + 1476, + 167 + ], + [ + 1479, + 166 + ], + [ + 1485, + 202 + ], + [ + 1490, + 228 + ], + [ + 1495, + 263 + ], + [ + 1500, + 293 + ], + [ + 1507, + 317 + ], + [ + 1511, + 350 + ], + [ + 1515, + 396 + ], + [ + 1530, + 399 + ], + [ + 1559, + 395 + ], + [ + 1553, + 381 + ], + [ + 1547, + 340 + ], + [ + 1534, + 300 + ], + [ + 1528, + 271 + ], + [ + 1519, + 236 + ], + [ + 1514, + 218 + ], + [ + 1519, + 194 + ], + [ + 1511, + 166 + ], + [ + 1499, + 132 + ], + [ + 1503, + 130 + ], + [ + 1501, + 119 + ], + [ + 1500, + 114 + ], + [ + 1508, + 114 + ], + [ + 1522, + 105 + ], + [ + 1524, + 104 + ], + [ + 1528, + 98 + ], + [ + 1532, + 94 + ], + [ + 1538, + 90 + ], + [ + 1537, + 85 + ], + [ + 1527, + 88 + ], + [ + 1512, + 92 + ], + [ + 1503, + 90 + ], + [ + 1509, + 84 + ], + [ + 1521, + 82 + ], + [ + 1526, + 77 + ], + [ + 1532, + 72 + ], + [ + 1523, + 65 + ], + [ + 1523, + 58 + ], + [ + 1524, + 49 + ], + [ + 1529, + 37 + ], + [ + 1537, + 16 + ], + [ + 1541, + 5 + ], + [ + 1556, + 51 + ], + [ + 1561, + 120 + ], + [ + 1582, + 233 + ], + [ + 1590, + 339 + ], + [ + 1589, + 403 + ], + [ + 1591, + 409 + ], + [ + 1612, + 412 + ], + [ + 1629, + 405 + ], + [ + 1632, + 393 + ], + [ + 1624, + 296 + ], + [ + 1609, + 213 + ], + [ + 1606, + 150 + ], + [ + 1597, + 98 + ], + [ + 1597, + 54 + ], + [ + 1600, + 30 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 994, + 274 + ], + [ + 1003, + 433 + ], + [ + 1008, + 433 + ], + [ + 998, + 273 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1126, + 382 + ], + [ + 1138, + 379 + ], + [ + 1151, + 378 + ], + [ + 1163, + 374 + ], + [ + 1172, + 373 + ], + [ + 1177, + 371 + ], + [ + 1179, + 371 + ], + [ + 1187, + 372 + ], + [ + 1188, + 375 + ], + [ + 1188, + 377 + ], + [ + 1188, + 380 + ], + [ + 1187, + 382 + ], + [ + 1181, + 382 + ], + [ + 1177, + 382 + ], + [ + 1174, + 382 + ], + [ + 1165, + 383 + ], + [ + 1158, + 386 + ], + [ + 1156, + 387 + ], + [ + 1152, + 388 + ], + [ + 1144, + 388 + ], + [ + 1137, + 388 + ], + [ + 1130, + 388 + ], + [ + 1126, + 385 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1020, + 344 + ], + [ + 1022, + 447 + ], + [ + 1027, + 446 + ], + [ + 1022, + 342 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1014, + 318 + ], + [ + 1017, + 348 + ], + [ + 1029, + 347 + ], + [ + 1027, + 319 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1036, + 388 + ], + [ + 1046, + 370 + ], + [ + 1022, + 370 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1029, + 400 + ], + [ + 1024, + 400 + ], + [ + 1023, + 376 + ], + [ + 1028, + 376 + ], + [ + 1029, + 376 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1065, + 411 + ], + [ + 1047, + 412 + ], + [ + 1040, + 410 + ], + [ + 1033, + 411 + ], + [ + 1026, + 407 + ], + [ + 1027, + 403 + ], + [ + 1032, + 397 + ], + [ + 1041, + 396 + ], + [ + 1045, + 395 + ], + [ + 1054, + 394 + ], + [ + 1059, + 395 + ], + [ + 1062, + 400 + ], + [ + 1065, + 407 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1018, + 380 + ], + [ + 1012, + 381 + ], + [ + 1014, + 383 + ], + [ + 1012, + 386 + ], + [ + 1011, + 388 + ], + [ + 1011, + 391 + ], + [ + 1014, + 393 + ], + [ + 1011, + 396 + ], + [ + 1011, + 397 + ], + [ + 1013, + 398 + ], + [ + 1015, + 402 + ], + [ + 1020, + 402 + ], + [ + 1022, + 402 + ], + [ + 1021, + 379 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1139, + 322 + ], + [ + 1139, + 338 + ], + [ + 1148, + 338 + ], + [ + 1147, + 323 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1152, + 350 + ], + [ + 1152, + 363 + ], + [ + 1159, + 363 + ], + [ + 1158, + 351 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1158, + 392 + ], + [ + 1151, + 390 + ], + [ + 1146, + 390 + ], + [ + 1143, + 396 + ], + [ + 1142, + 401 + ], + [ + 1148, + 404 + ], + [ + 1154, + 402 + ], + [ + 1161, + 401 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1169, + 402 + ], + [ + 1167, + 392 + ], + [ + 1157, + 392 + ], + [ + 1155, + 394 + ], + [ + 1153, + 400 + ], + [ + 1153, + 403 + ], + [ + 1159, + 410 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1176, + 421 + ], + [ + 1175, + 412 + ], + [ + 1172, + 404 + ], + [ + 1170, + 402 + ], + [ + 1161, + 402 + ], + [ + 1156, + 402 + ], + [ + 1152, + 405 + ], + [ + 1150, + 411 + ], + [ + 1152, + 419 + ], + [ + 1152, + 422 + ], + [ + 1154, + 424 + ], + [ + 1156, + 423 + ], + [ + 1158, + 421 + ], + [ + 1170, + 421 + ], + [ + 1171, + 422 + ], + [ + 1175, + 423 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1174, + 388 + ], + [ + 1185, + 388 + ], + [ + 1187, + 395 + ], + [ + 1188, + 398 + ], + [ + 1187, + 400 + ], + [ + 1182, + 402 + ], + [ + 1174, + 402 + ], + [ + 1171, + 401 + ], + [ + 1170, + 393 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1146, + 387 + ], + [ + 1148, + 440 + ], + [ + 1150, + 439 + ], + [ + 1148, + 387 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1145, + 374 + ], + [ + 1146, + 391 + ], + [ + 1149, + 392 + ], + [ + 1151, + 391 + ], + [ + 1151, + 391 + ], + [ + 1152, + 389 + ], + [ + 1152, + 386 + ], + [ + 1152, + 384 + ], + [ + 1153, + 382 + ], + [ + 1154, + 381 + ], + [ + 1154, + 379 + ], + [ + 1154, + 373 + ], + [ + 1153, + 370 + ], + [ + 1150, + 368 + ], + [ + 1147, + 370 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1133, + 419 + ], + [ + 1135, + 440 + ], + [ + 1149, + 439 + ], + [ + 1148, + 418 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1196, + 411 + ], + [ + 1182, + 416 + ], + [ + 1181, + 420 + ], + [ + 1180, + 427 + ], + [ + 1184, + 427 + ], + [ + 1197, + 429 + ], + [ + 1197, + 425 + ], + [ + 1201, + 420 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1227, + 410 + ], + [ + 1204, + 410 + ], + [ + 1202, + 412 + ], + [ + 1199, + 418 + ], + [ + 1197, + 424 + ], + [ + 1196, + 428 + ], + [ + 1196, + 435 + ], + [ + 1197, + 440 + ], + [ + 1198, + 441 + ], + [ + 1203, + 443 + ], + [ + 1204, + 443 + ], + [ + 1205, + 440 + ], + [ + 1230, + 439 + ], + [ + 1232, + 442 + ], + [ + 1237, + 443 + ], + [ + 1238, + 440 + ], + [ + 1239, + 427 + ], + [ + 1237, + 419 + ], + [ + 1233, + 411 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1251, + 432 + ], + [ + 1252, + 441 + ], + [ + 1252, + 447 + ], + [ + 1252, + 452 + ], + [ + 1256, + 454 + ], + [ + 1264, + 454 + ], + [ + 1264, + 450 + ], + [ + 1305, + 449 + ], + [ + 1305, + 451 + ], + [ + 1306, + 453 + ], + [ + 1311, + 454 + ], + [ + 1314, + 450 + ], + [ + 1314, + 438 + ], + [ + 1311, + 421 + ], + [ + 1303, + 407 + ], + [ + 1283, + 405 + ], + [ + 1271, + 405 + ], + [ + 1263, + 408 + ], + [ + 1258, + 415 + ], + [ + 1254, + 421 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1288, + 384 + ], + [ + 1286, + 384 + ], + [ + 1285, + 387 + ], + [ + 1285, + 390 + ], + [ + 1288, + 392 + ], + [ + 1292, + 392 + ], + [ + 1294, + 390 + ], + [ + 1294, + 387 + ], + [ + 1292, + 384 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1559, + 531 + ], + [ + 1531, + 531 + ], + [ + 1514, + 526 + ], + [ + 1502, + 520 + ], + [ + 1487, + 513 + ], + [ + 2047, + 773 + ], + [ + 2047, + 599 + ], + [ + 1614, + 500 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1559, + 531 + ], + [ + 1531, + 531 + ], + [ + 1514, + 526 + ], + [ + 1502, + 520 + ], + [ + 1487, + 513 + ], + [ + 2047, + 773 + ], + [ + 2047, + 599 + ], + [ + 1614, + 500 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1277, + 294 + ], + [ + 1295, + 294 + ], + [ + 1342, + 302 + ], + [ + 1342, + 306 + ], + [ + 1295, + 297 + ], + [ + 1273, + 297 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1266, + 288 + ], + [ + 1268, + 315 + ], + [ + 1280, + 315 + ], + [ + 1278, + 287 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1355, + 366 + ], + [ + 1351, + 367 + ], + [ + 1350, + 370 + ], + [ + 1355, + 371 + ], + [ + 1355, + 375 + ], + [ + 1351, + 374 + ], + [ + 1350, + 377 + ], + [ + 1354, + 378 + ], + [ + 1354, + 381 + ], + [ + 1351, + 381 + ], + [ + 1351, + 386 + ], + [ + 1356, + 387 + ], + [ + 1358, + 365 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1567, + 271 + ], + [ + 1559, + 275 + ], + [ + 1556, + 281 + ], + [ + 1557, + 290 + ], + [ + 1560, + 293 + ], + [ + 1562, + 297 + ], + [ + 1561, + 309 + ], + [ + 1566, + 310 + ], + [ + 1561, + 313 + ], + [ + 1559, + 319 + ], + [ + 1561, + 327 + ], + [ + 1562, + 329 + ], + [ + 1565, + 332 + ], + [ + 1561, + 332 + ], + [ + 1563, + 345 + ], + [ + 1575, + 346 + ], + [ + 1573, + 331 + ], + [ + 1570, + 331 + ], + [ + 1574, + 328 + ], + [ + 1575, + 322 + ], + [ + 1575, + 314 + ], + [ + 1573, + 309 + ], + [ + 1570, + 308 + ], + [ + 1572, + 308 + ], + [ + 1573, + 294 + ], + [ + 1569, + 294 + ], + [ + 1573, + 291 + ], + [ + 1576, + 283 + ], + [ + 1574, + 278 + ], + [ + 1569, + 274 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1210, + 171 + ], + [ + 1222, + 173 + ], + [ + 1311, + 172 + ], + [ + 1417, + 188 + ], + [ + 1429, + 194 + ], + [ + 1438, + 206 + ], + [ + 1441, + 222 + ], + [ + 1444, + 313 + ], + [ + 1445, + 432 + ], + [ + 1455, + 434 + ], + [ + 1449, + 290 + ], + [ + 1447, + 228 + ], + [ + 1445, + 206 + ], + [ + 1430, + 187 + ], + [ + 1388, + 178 + ], + [ + 1302, + 166 + ], + [ + 1219, + 169 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1201, + 160 + ], + [ + 1202, + 203 + ], + [ + 1205, + 207 + ], + [ + 1212, + 208 + ], + [ + 1224, + 207 + ], + [ + 1228, + 202 + ], + [ + 1226, + 164 + ], + [ + 1225, + 157 + ], + [ + 1221, + 155 + ], + [ + 1204, + 156 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1453, + 312 + ], + [ + 1439, + 312 + ], + [ + 1428, + 315 + ], + [ + 1429, + 320 + ], + [ + 1438, + 321 + ], + [ + 1439, + 326 + ], + [ + 1430, + 329 + ], + [ + 1431, + 334 + ], + [ + 1437, + 334 + ], + [ + 1438, + 341 + ], + [ + 1430, + 343 + ], + [ + 1431, + 349 + ], + [ + 1440, + 351 + ], + [ + 1440, + 355 + ], + [ + 1452, + 354 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1506, + 394 + ], + [ + 1468, + 394 + ], + [ + 1456, + 408 + ], + [ + 1448, + 423 + ], + [ + 1443, + 431 + ], + [ + 1436, + 450 + ], + [ + 1436, + 465 + ], + [ + 1437, + 476 + ], + [ + 1442, + 481 + ], + [ + 1445, + 481 + ], + [ + 1450, + 481 + ], + [ + 1452, + 484 + ], + [ + 1453, + 490 + ], + [ + 1458, + 491 + ], + [ + 1461, + 490 + ], + [ + 1470, + 484 + ], + [ + 1488, + 458 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1320, + 444 + ], + [ + 1315, + 472 + ], + [ + 1315, + 485 + ], + [ + 1315, + 490 + ], + [ + 1318, + 495 + ], + [ + 1322, + 500 + ], + [ + 1332, + 500 + ], + [ + 1335, + 490 + ], + [ + 1375, + 487 + ], + [ + 1415, + 489 + ], + [ + 1415, + 492 + ], + [ + 1421, + 493 + ], + [ + 1425, + 499 + ], + [ + 1431, + 500 + ], + [ + 1438, + 499 + ], + [ + 1440, + 489 + ], + [ + 1441, + 475 + ], + [ + 1441, + 459 + ], + [ + 1440, + 437 + ], + [ + 1419, + 402 + ], + [ + 1405, + 397 + ], + [ + 1375, + 397 + ], + [ + 1352, + 398 + ], + [ + 1345, + 398 + ], + [ + 1334, + 412 + ], + [ + 1330, + 420 + ], + [ + 1325, + 431 + ], + [ + 1320, + 437 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1573, + 388 + ], + [ + 1531, + 389 + ], + [ + 1506, + 394 + ], + [ + 1489, + 398 + ], + [ + 1481, + 413 + ], + [ + 1472, + 429 + ], + [ + 1466, + 444 + ], + [ + 1465, + 476 + ], + [ + 1467, + 494 + ], + [ + 1467, + 501 + ], + [ + 1467, + 502 + ], + [ + 1473, + 505 + ], + [ + 1476, + 509 + ], + [ + 1478, + 511 + ], + [ + 1480, + 514 + ], + [ + 1489, + 515 + ], + [ + 1493, + 512 + ], + [ + 1494, + 506 + ], + [ + 1494, + 499 + ], + [ + 1500, + 496 + ], + [ + 1504, + 494 + ], + [ + 1513, + 493 + ], + [ + 1570, + 493 + ], + [ + 1594, + 423 + ], + [ + 1593, + 403 + ], + [ + 1586, + 394 + ], + [ + 1578, + 390 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1714, + 393 + ], + [ + 1684, + 391 + ], + [ + 1642, + 391 + ], + [ + 1616, + 395 + ], + [ + 1592, + 400 + ], + [ + 1580, + 414 + ], + [ + 1570, + 441 + ], + [ + 1568, + 438 + ], + [ + 1549, + 439 + ], + [ + 1548, + 442 + ], + [ + 1549, + 448 + ], + [ + 1554, + 452 + ], + [ + 1564, + 452 + ], + [ + 1560, + 462 + ], + [ + 1557, + 481 + ], + [ + 1557, + 499 + ], + [ + 1559, + 513 + ], + [ + 1562, + 529 + ], + [ + 1564, + 534 + ], + [ + 1570, + 537 + ], + [ + 1574, + 536 + ], + [ + 1577, + 546 + ], + [ + 1583, + 549 + ], + [ + 1591, + 549 + ], + [ + 1597, + 548 + ], + [ + 1599, + 542 + ], + [ + 1601, + 531 + ], + [ + 1603, + 527 + ], + [ + 1616, + 527 + ], + [ + 1634, + 527 + ], + [ + 1655, + 522 + ], + [ + 1693, + 490 + ], + [ + 1727, + 435 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1884, + 380 + ], + [ + 1820, + 378 + ], + [ + 1761, + 382 + ], + [ + 1714, + 390 + ], + [ + 1684, + 412 + ], + [ + 1669, + 443 + ], + [ + 1655, + 444 + ], + [ + 1647, + 450 + ], + [ + 1649, + 458 + ], + [ + 1657, + 460 + ], + [ + 1661, + 462 + ], + [ + 1644, + 479 + ], + [ + 1642, + 492 + ], + [ + 1643, + 505 + ], + [ + 1643, + 514 + ], + [ + 1643, + 528 + ], + [ + 1642, + 540 + ], + [ + 1646, + 550 + ], + [ + 1648, + 563 + ], + [ + 1658, + 566 + ], + [ + 1669, + 565 + ], + [ + 1671, + 558 + ], + [ + 1673, + 550 + ], + [ + 1710, + 552 + ], + [ + 1709, + 566 + ], + [ + 1712, + 577 + ], + [ + 1718, + 587 + ], + [ + 1733, + 592 + ], + [ + 1742, + 591 + ], + [ + 1749, + 584 + ], + [ + 1752, + 570 + ], + [ + 1755, + 554 + ], + [ + 1822, + 552 + ], + [ + 1897, + 402 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 2039, + 342 + ], + [ + 1963, + 350 + ], + [ + 1913, + 360 + ], + [ + 1880, + 374 + ], + [ + 1862, + 398 + ], + [ + 1835, + 439 + ], + [ + 1816, + 441 + ], + [ + 1813, + 448 + ], + [ + 1809, + 459 + ], + [ + 1815, + 468 + ], + [ + 1821, + 470 + ], + [ + 1805, + 515 + ], + [ + 1803, + 533 + ], + [ + 1806, + 544 + ], + [ + 1808, + 558 + ], + [ + 1809, + 584 + ], + [ + 1812, + 609 + ], + [ + 1815, + 627 + ], + [ + 1826, + 634 + ], + [ + 1841, + 638 + ], + [ + 1853, + 634 + ], + [ + 1859, + 623 + ], + [ + 1861, + 612 + ], + [ + 1919, + 631 + ], + [ + 1920, + 643 + ], + [ + 1924, + 665 + ], + [ + 1933, + 683 + ], + [ + 1939, + 688 + ], + [ + 1958, + 691 + ], + [ + 1966, + 689 + ], + [ + 1980, + 683 + ], + [ + 1987, + 675 + ], + [ + 1992, + 665 + ], + [ + 2004, + 648 + ], + [ + 2011, + 640 + ], + [ + 2034, + 639 + ], + [ + 2048, + 642 + ], + [ + 2048, + 342 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 0, + 424 + ], + [ + 804, + 425 + ], + [ + 811, + 502 + ], + [ + 0, + 651 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 739, + 341 + ], + [ + 738, + 504 + ], + [ + 742, + 504 + ], + [ + 744, + 337 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 718, + 275 + ], + [ + 719, + 347 + ], + [ + 765, + 346 + ], + [ + 765, + 275 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 716, + 502 + ], + [ + 716, + 520 + ], + [ + 752, + 518 + ], + [ + 763, + 516 + ], + [ + 762, + 499 + ], + [ + 734, + 498 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 949, + 406 + ], + [ + 960, + 424 + ], + [ + 972, + 444 + ], + [ + 975, + 438 + ], + [ + 976, + 437 + ], + [ + 982, + 437 + ], + [ + 987, + 438 + ], + [ + 990, + 441 + ], + [ + 990, + 444 + ], + [ + 988, + 447 + ], + [ + 984, + 451 + ], + [ + 975, + 455 + ], + [ + 981, + 468 + ], + [ + 985, + 478 + ], + [ + 986, + 488 + ], + [ + 986, + 505 + ], + [ + 986, + 523 + ], + [ + 986, + 531 + ], + [ + 983, + 535 + ], + [ + 973, + 536 + ], + [ + 971, + 540 + ], + [ + 965, + 545 + ], + [ + 959, + 546 + ], + [ + 950, + 544 + ], + [ + 947, + 540 + ], + [ + 947, + 534 + ], + [ + 945, + 531 + ], + [ + 853, + 534 + ], + [ + 853, + 539 + ], + [ + 849, + 542 + ], + [ + 845, + 542 + ], + [ + 840, + 540 + ], + [ + 837, + 535 + ], + [ + 827, + 537 + ], + [ + 826, + 543 + ], + [ + 825, + 550 + ], + [ + 821, + 552 + ], + [ + 813, + 553 + ], + [ + 807, + 549 + ], + [ + 805, + 534 + ], + [ + 801, + 516 + ], + [ + 800, + 493 + ], + [ + 802, + 483 + ], + [ + 805, + 476 + ], + [ + 805, + 466 + ], + [ + 805, + 459 + ], + [ + 807, + 452 + ], + [ + 810, + 448 + ], + [ + 822, + 421 + ], + [ + 827, + 412 + ], + [ + 831, + 408 + ], + [ + 841, + 404 + ], + [ + 871, + 403 + ], + [ + 890, + 401 + ], + [ + 910, + 401 + ], + [ + 933, + 401 + ], + [ + 943, + 401 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 853, + 481 + ], + [ + 853, + 495 + ], + [ + 907, + 493 + ], + [ + 908, + 480 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 2007, + 203 + ], + [ + 2008, + 244 + ], + [ + 2048, + 243 + ], + [ + 2048, + 202 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 852, + 29 + ], + [ + 851, + 43 + ], + [ + 882, + 43 + ], + [ + 884, + 28 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000160_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000160_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..8a6f517aa8d4c18bcf303852d0acbed8644c3cde Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000160_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000160_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000160_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..88820092ab48692a1f81cc264ace87f9834c350a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000160_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000161_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000161_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..75dbd0cd499ba73128e13e0601851ac38b35d9d7 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000161_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000163_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000163_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..2f2b16898683c52df95667161136f5d6f2f1c5ad Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000163_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000166_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000166_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..aa50a1a317a57ee908e826f1988fc4d9586ee109 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000166_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000167_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000167_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..eda1ff2fdfbda838591b322227a18151fe88cac5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000167_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..0ec60badd7404e08aef213e464d49c08227b7285 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..e86c4a102153a358b3efdf209c70d7de72fd0fd3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..4017c0bc65ebb25192983cb6e7b932d4a60755e9 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000168_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000169_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000169_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..9cd600bc882522efe01a6279a400f92bf0a819ae Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000169_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000169_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000169_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ce43a58ef3cc3e8cefd8576088fe2dbca25c3320 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000169_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000170_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000170_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..e0fe36b59a18057fda46c2049babfa32329572c8 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000170_000019_gtFine_polygons.json @@ -0,0 +1,4814 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 257, + 338 + ], + [ + 2048, + 288 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 314 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1918, + 1007 + ], + [ + 1484, + 727 + ], + [ + 1142, + 505 + ], + [ + 1095, + 472 + ], + [ + 1108, + 454 + ], + [ + 1361, + 489 + ], + [ + 2048, + 642 + ], + [ + 2048, + 1024 + ], + [ + 2041, + 1022 + ], + [ + 1951, + 1010 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1135, + 456 + ], + [ + 1095, + 474 + ], + [ + 1129, + 505 + ], + [ + 1942, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 662 + ], + [ + 1477, + 507 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 11, + 653 + ], + [ + 198, + 630 + ], + [ + 368, + 612 + ], + [ + 410, + 606 + ], + [ + 516, + 589 + ], + [ + 686, + 539 + ], + [ + 825, + 496 + ], + [ + 895, + 479 + ], + [ + 912, + 469 + ], + [ + 962, + 468 + ], + [ + 1019, + 464 + ], + [ + 1076, + 460 + ], + [ + 1081, + 436 + ], + [ + 905, + 454 + ], + [ + 795, + 469 + ], + [ + 600, + 497 + ], + [ + 384, + 510 + ], + [ + 0, + 514 + ], + [ + 0, + 655 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 837, + 471 + ], + [ + 376, + 549 + ], + [ + 306, + 584 + ], + [ + 405, + 587 + ], + [ + 412, + 601 + ], + [ + 442, + 600 + ], + [ + 563, + 577 + ], + [ + 767, + 515 + ], + [ + 836, + 492 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 12, + 559 + ], + [ + 176, + 551 + ], + [ + 323, + 530 + ], + [ + 676, + 490 + ], + [ + 791, + 461 + ], + [ + 883, + 454 + ], + [ + 934, + 456 + ], + [ + 996, + 456 + ], + [ + 1051, + 456 + ], + [ + 1127, + 451 + ], + [ + 1564, + 495 + ], + [ + 2048, + 590 + ], + [ + 2048, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 559 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 469, + 10 + ], + [ + 425, + 48 + ], + [ + 436, + 54 + ], + [ + 463, + 49 + ], + [ + 454, + 66 + ], + [ + 452, + 93 + ], + [ + 463, + 110 + ], + [ + 471, + 121 + ], + [ + 473, + 124 + ], + [ + 469, + 133 + ], + [ + 477, + 128 + ], + [ + 483, + 109 + ], + [ + 488, + 100 + ], + [ + 491, + 99 + ], + [ + 509, + 102 + ], + [ + 512, + 113 + ], + [ + 502, + 130 + ], + [ + 496, + 144 + ], + [ + 479, + 149 + ], + [ + 461, + 155 + ], + [ + 445, + 158 + ], + [ + 427, + 159 + ], + [ + 433, + 167 + ], + [ + 456, + 172 + ], + [ + 484, + 181 + ], + [ + 491, + 193 + ], + [ + 514, + 211 + ], + [ + 522, + 224 + ], + [ + 538, + 231 + ], + [ + 548, + 223 + ], + [ + 565, + 217 + ], + [ + 574, + 216 + ], + [ + 581, + 222 + ], + [ + 589, + 220 + ], + [ + 597, + 217 + ], + [ + 600, + 208 + ], + [ + 610, + 206 + ], + [ + 611, + 228 + ], + [ + 613, + 257 + ], + [ + 612, + 292 + ], + [ + 608, + 333 + ], + [ + 611, + 389 + ], + [ + 610, + 425 + ], + [ + 608, + 462 + ], + [ + 624, + 480 + ], + [ + 627, + 475 + ], + [ + 628, + 404 + ], + [ + 628, + 388 + ], + [ + 628, + 375 + ], + [ + 628, + 365 + ], + [ + 630, + 347 + ], + [ + 627, + 330 + ], + [ + 630, + 316 + ], + [ + 628, + 299 + ], + [ + 628, + 279 + ], + [ + 630, + 256 + ], + [ + 636, + 242 + ], + [ + 636, + 230 + ], + [ + 634, + 221 + ], + [ + 635, + 207 + ], + [ + 637, + 203 + ], + [ + 647, + 212 + ], + [ + 661, + 218 + ], + [ + 681, + 220 + ], + [ + 710, + 221 + ], + [ + 722, + 218 + ], + [ + 723, + 219 + ], + [ + 725, + 226 + ], + [ + 736, + 235 + ], + [ + 734, + 244 + ], + [ + 733, + 257 + ], + [ + 733, + 263 + ], + [ + 739, + 269 + ], + [ + 743, + 399 + ], + [ + 745, + 393 + ], + [ + 748, + 380 + ], + [ + 752, + 374 + ], + [ + 760, + 377 + ], + [ + 761, + 383 + ], + [ + 757, + 407 + ], + [ + 761, + 422 + ], + [ + 766, + 430 + ], + [ + 768, + 441 + ], + [ + 774, + 441 + ], + [ + 789, + 447 + ], + [ + 795, + 444 + ], + [ + 794, + 436 + ], + [ + 791, + 393 + ], + [ + 808, + 394 + ], + [ + 818, + 393 + ], + [ + 835, + 386 + ], + [ + 845, + 389 + ], + [ + 846, + 409 + ], + [ + 846, + 428 + ], + [ + 853, + 429 + ], + [ + 854, + 397 + ], + [ + 866, + 384 + ], + [ + 875, + 386 + ], + [ + 883, + 383 + ], + [ + 891, + 381 + ], + [ + 897, + 388 + ], + [ + 903, + 399 + ], + [ + 916, + 408 + ], + [ + 919, + 408 + ], + [ + 927, + 404 + ], + [ + 933, + 402 + ], + [ + 938, + 396 + ], + [ + 941, + 392 + ], + [ + 940, + 390 + ], + [ + 937, + 388 + ], + [ + 941, + 378 + ], + [ + 944, + 385 + ], + [ + 946, + 385 + ], + [ + 950, + 377 + ], + [ + 956, + 372 + ], + [ + 969, + 372 + ], + [ + 981, + 377 + ], + [ + 997, + 380 + ], + [ + 998, + 382 + ], + [ + 982, + 386 + ], + [ + 982, + 395 + ], + [ + 999, + 402 + ], + [ + 1008, + 400 + ], + [ + 1011, + 400 + ], + [ + 1010, + 453 + ], + [ + 1019, + 453 + ], + [ + 1016, + 396 + ], + [ + 1029, + 395 + ], + [ + 1043, + 391 + ], + [ + 1057, + 392 + ], + [ + 1078, + 390 + ], + [ + 1080, + 390 + ], + [ + 1091, + 385 + ], + [ + 1097, + 390 + ], + [ + 1101, + 393 + ], + [ + 1092, + 413 + ], + [ + 1080, + 423 + ], + [ + 1076, + 425 + ], + [ + 1067, + 435 + ], + [ + 1067, + 448 + ], + [ + 1074, + 460 + ], + [ + 1083, + 465 + ], + [ + 1102, + 459 + ], + [ + 1098, + 465 + ], + [ + 1096, + 470 + ], + [ + 1129, + 456 + ], + [ + 1176, + 449 + ], + [ + 1173, + 400 + ], + [ + 1183, + 385 + ], + [ + 1195, + 368 + ], + [ + 1201, + 343 + ], + [ + 1207, + 336 + ], + [ + 1222, + 344 + ], + [ + 1232, + 342 + ], + [ + 1226, + 338 + ], + [ + 1250, + 331 + ], + [ + 1223, + 321 + ], + [ + 1239, + 321 + ], + [ + 1257, + 325 + ], + [ + 1259, + 423 + ], + [ + 1274, + 427 + ], + [ + 1270, + 330 + ], + [ + 1277, + 324 + ], + [ + 1303, + 322 + ], + [ + 1311, + 314 + ], + [ + 1316, + 301 + ], + [ + 1316, + 295 + ], + [ + 1300, + 301 + ], + [ + 1289, + 299 + ], + [ + 1293, + 290 + ], + [ + 1315, + 291 + ], + [ + 1324, + 295 + ], + [ + 1337, + 293 + ], + [ + 1350, + 297 + ], + [ + 1350, + 371 + ], + [ + 1356, + 413 + ], + [ + 1378, + 410 + ], + [ + 1370, + 314 + ], + [ + 1364, + 265 + ], + [ + 1358, + 207 + ], + [ + 1374, + 218 + ], + [ + 1381, + 219 + ], + [ + 1381, + 214 + ], + [ + 1367, + 204 + ], + [ + 1378, + 204 + ], + [ + 1376, + 181 + ], + [ + 1378, + 169 + ], + [ + 1394, + 174 + ], + [ + 1389, + 185 + ], + [ + 1393, + 194 + ], + [ + 1409, + 197 + ], + [ + 1414, + 193 + ], + [ + 1414, + 184 + ], + [ + 1423, + 180 + ], + [ + 1437, + 180 + ], + [ + 1437, + 172 + ], + [ + 1447, + 168 + ], + [ + 1460, + 180 + ], + [ + 1471, + 183 + ], + [ + 1473, + 196 + ], + [ + 1476, + 206 + ], + [ + 1472, + 216 + ], + [ + 1464, + 222 + ], + [ + 1463, + 244 + ], + [ + 1476, + 237 + ], + [ + 1486, + 237 + ], + [ + 1507, + 242 + ], + [ + 1523, + 238 + ], + [ + 1550, + 241 + ], + [ + 1559, + 241 + ], + [ + 1548, + 231 + ], + [ + 1510, + 215 + ], + [ + 1498, + 206 + ], + [ + 1496, + 189 + ], + [ + 1497, + 166 + ], + [ + 1498, + 154 + ], + [ + 1496, + 148 + ], + [ + 1500, + 149 + ], + [ + 1510, + 149 + ], + [ + 1521, + 140 + ], + [ + 1534, + 135 + ], + [ + 1553, + 138 + ], + [ + 1555, + 147 + ], + [ + 1573, + 144 + ], + [ + 1592, + 143 + ], + [ + 1586, + 133 + ], + [ + 1583, + 121 + ], + [ + 1580, + 117 + ], + [ + 1587, + 112 + ], + [ + 1603, + 112 + ], + [ + 1613, + 104 + ], + [ + 1623, + 99 + ], + [ + 1637, + 94 + ], + [ + 1622, + 87 + ], + [ + 1604, + 88 + ], + [ + 1601, + 74 + ], + [ + 1611, + 73 + ], + [ + 1623, + 62 + ], + [ + 1606, + 60 + ], + [ + 1617, + 41 + ], + [ + 1595, + 50 + ], + [ + 1595, + 31 + ], + [ + 1567, + 38 + ], + [ + 1571, + 24 + ], + [ + 1571, + 10 + ], + [ + 1585, + 0 + ], + [ + 1101, + 0 + ], + [ + 397, + 0 + ], + [ + 407, + 7 + ], + [ + 425, + 5 + ], + [ + 436, + 14 + ], + [ + 450, + 20 + ], + [ + 458, + 9 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 911, + 12 + ], + [ + 909, + 35 + ], + [ + 907, + 48 + ], + [ + 906, + 51 + ], + [ + 918, + 42 + ], + [ + 920, + 35 + ], + [ + 930, + 29 + ], + [ + 948, + 33 + ], + [ + 964, + 39 + ], + [ + 963, + 48 + ], + [ + 968, + 63 + ], + [ + 959, + 69 + ], + [ + 957, + 71 + ], + [ + 967, + 89 + ], + [ + 964, + 98 + ], + [ + 953, + 109 + ], + [ + 950, + 123 + ], + [ + 955, + 137 + ], + [ + 967, + 136 + ], + [ + 970, + 152 + ], + [ + 959, + 156 + ], + [ + 955, + 162 + ], + [ + 966, + 170 + ], + [ + 978, + 185 + ], + [ + 973, + 196 + ], + [ + 965, + 209 + ], + [ + 977, + 219 + ], + [ + 978, + 224 + ], + [ + 982, + 212 + ], + [ + 989, + 199 + ], + [ + 997, + 197 + ], + [ + 1000, + 191 + ], + [ + 1006, + 186 + ], + [ + 1019, + 183 + ], + [ + 1034, + 196 + ], + [ + 1046, + 212 + ], + [ + 1049, + 219 + ], + [ + 1054, + 226 + ], + [ + 1058, + 239 + ], + [ + 1073, + 230 + ], + [ + 1087, + 215 + ], + [ + 1092, + 212 + ], + [ + 1081, + 203 + ], + [ + 1064, + 194 + ], + [ + 1059, + 180 + ], + [ + 1067, + 177 + ], + [ + 1064, + 168 + ], + [ + 1085, + 168 + ], + [ + 1080, + 154 + ], + [ + 1102, + 143 + ], + [ + 1087, + 134 + ], + [ + 1075, + 145 + ], + [ + 1067, + 144 + ], + [ + 1044, + 115 + ], + [ + 1044, + 109 + ], + [ + 1052, + 94 + ], + [ + 1034, + 89 + ], + [ + 1022, + 95 + ], + [ + 1013, + 96 + ], + [ + 1002, + 99 + ], + [ + 990, + 95 + ], + [ + 1002, + 79 + ], + [ + 1006, + 62 + ], + [ + 978, + 57 + ], + [ + 989, + 39 + ], + [ + 1012, + 24 + ], + [ + 1019, + 5 + ], + [ + 1023, + 6 + ], + [ + 1041, + 20 + ], + [ + 1056, + 9 + ], + [ + 1063, + 6 + ], + [ + 1069, + 18 + ], + [ + 1113, + 0 + ], + [ + 903, + 0 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 963, + 168 + ], + [ + 1015, + 180 + ], + [ + 1019, + 176 + ], + [ + 1076, + 228 + ], + [ + 1063, + 236 + ], + [ + 1056, + 241 + ], + [ + 1047, + 238 + ], + [ + 1044, + 226 + ], + [ + 1042, + 213 + ], + [ + 1039, + 205 + ], + [ + 1019, + 188 + ], + [ + 1013, + 185 + ], + [ + 1000, + 197 + ], + [ + 993, + 199 + ], + [ + 986, + 218 + ], + [ + 983, + 230 + ], + [ + 979, + 228 + ], + [ + 976, + 221 + ], + [ + 956, + 219 + ], + [ + 958, + 228 + ], + [ + 947, + 224 + ], + [ + 963, + 210 + ], + [ + 970, + 196 + ], + [ + 973, + 186 + ], + [ + 964, + 174 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1064, + 448 + ], + [ + 1057, + 441 + ], + [ + 1038, + 441 + ], + [ + 1027, + 445 + ], + [ + 1020, + 451 + ], + [ + 1016, + 456 + ], + [ + 1016, + 463 + ], + [ + 1019, + 464 + ], + [ + 1043, + 463 + ], + [ + 1063, + 463 + ], + [ + 1066, + 460 + ], + [ + 1066, + 453 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1004, + 442 + ], + [ + 985, + 443 + ], + [ + 979, + 451 + ], + [ + 976, + 457 + ], + [ + 980, + 461 + ], + [ + 996, + 462 + ], + [ + 1013, + 462 + ], + [ + 1017, + 461 + ], + [ + 1016, + 452 + ], + [ + 1015, + 446 + ], + [ + 1006, + 444 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 952, + 440 + ], + [ + 965, + 440 + ], + [ + 969, + 437 + ], + [ + 977, + 432 + ], + [ + 975, + 442 + ], + [ + 979, + 449 + ], + [ + 979, + 457 + ], + [ + 978, + 462 + ], + [ + 969, + 464 + ], + [ + 960, + 465 + ], + [ + 951, + 454 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 930, + 443 + ], + [ + 934, + 438 + ], + [ + 951, + 438 + ], + [ + 956, + 446 + ], + [ + 959, + 458 + ], + [ + 959, + 464 + ], + [ + 959, + 465 + ], + [ + 952, + 466 + ], + [ + 948, + 464 + ], + [ + 933, + 451 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 926, + 439 + ], + [ + 923, + 447 + ], + [ + 917, + 448 + ], + [ + 910, + 453 + ], + [ + 908, + 461 + ], + [ + 909, + 465 + ], + [ + 916, + 469 + ], + [ + 930, + 469 + ], + [ + 951, + 469 + ], + [ + 950, + 464 + ], + [ + 949, + 458 + ], + [ + 941, + 449 + ], + [ + 934, + 444 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 863, + 400 + ], + [ + 858, + 401 + ], + [ + 857, + 406 + ], + [ + 858, + 410 + ], + [ + 860, + 412 + ], + [ + 862, + 412 + ], + [ + 868, + 411 + ], + [ + 869, + 407 + ], + [ + 869, + 403 + ], + [ + 866, + 400 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 862, + 431 + ], + [ + 861, + 400 + ], + [ + 863, + 400 + ], + [ + 863, + 434 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 874, + 427 + ], + [ + 854, + 428 + ], + [ + 856, + 448 + ], + [ + 878, + 473 + ], + [ + 892, + 476 + ], + [ + 894, + 474 + ], + [ + 899, + 470 + ], + [ + 901, + 464 + ], + [ + 899, + 453 + ], + [ + 898, + 443 + ], + [ + 887, + 428 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 971, + 348 + ], + [ + 945, + 347 + ], + [ + 908, + 366 + ], + [ + 909, + 367 + ], + [ + 945, + 350 + ], + [ + 970, + 350 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 955, + 369 + ], + [ + 974, + 369 + ], + [ + 974, + 347 + ], + [ + 954, + 347 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 900, + 403 + ], + [ + 900, + 423 + ], + [ + 920, + 422 + ], + [ + 918, + 402 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 934, + 412 + ], + [ + 927, + 406 + ], + [ + 924, + 412 + ], + [ + 926, + 416 + ], + [ + 925, + 417 + ], + [ + 924, + 419 + ], + [ + 925, + 422 + ], + [ + 927, + 425 + ], + [ + 930, + 425 + ], + [ + 933, + 423 + ], + [ + 933, + 419 + ], + [ + 930, + 417 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 870, + 448 + ], + [ + 881, + 454 + ], + [ + 889, + 463 + ], + [ + 894, + 471 + ], + [ + 894, + 479 + ], + [ + 879, + 481 + ], + [ + 870, + 483 + ], + [ + 869, + 474 + ], + [ + 863, + 460 + ], + [ + 856, + 446 + ], + [ + 859, + 438 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 806, + 437 + ], + [ + 807, + 428 + ], + [ + 813, + 423 + ], + [ + 831, + 423 + ], + [ + 849, + 423 + ], + [ + 854, + 425 + ], + [ + 860, + 434 + ], + [ + 863, + 445 + ], + [ + 866, + 448 + ], + [ + 867, + 450 + ], + [ + 870, + 460 + ], + [ + 871, + 473 + ], + [ + 872, + 484 + ], + [ + 862, + 487 + ], + [ + 857, + 485 + ], + [ + 859, + 480 + ], + [ + 824, + 456 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 817, + 436 + ], + [ + 845, + 437 + ], + [ + 854, + 452 + ], + [ + 857, + 453 + ], + [ + 857, + 455 + ], + [ + 860, + 464 + ], + [ + 863, + 478 + ], + [ + 863, + 484 + ], + [ + 863, + 486 + ], + [ + 862, + 487 + ], + [ + 858, + 489 + ], + [ + 852, + 489 + ], + [ + 834, + 477 + ], + [ + 802, + 451 + ], + [ + 796, + 444 + ], + [ + 798, + 437 + ], + [ + 812, + 436 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 835, + 444 + ], + [ + 826, + 443 + ], + [ + 822, + 440 + ], + [ + 812, + 438 + ], + [ + 800, + 436 + ], + [ + 793, + 436 + ], + [ + 788, + 445 + ], + [ + 793, + 462 + ], + [ + 800, + 479 + ], + [ + 802, + 486 + ], + [ + 808, + 491 + ], + [ + 813, + 493 + ], + [ + 821, + 496 + ], + [ + 830, + 497 + ], + [ + 848, + 495 + ], + [ + 853, + 488 + ], + [ + 852, + 474 + ], + [ + 848, + 466 + ], + [ + 842, + 460 + ], + [ + 833, + 454 + ], + [ + 835, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 753, + 434 + ], + [ + 788, + 435 + ], + [ + 795, + 449 + ], + [ + 797, + 453 + ], + [ + 801, + 454 + ], + [ + 801, + 456 + ], + [ + 804, + 466 + ], + [ + 805, + 478 + ], + [ + 804, + 490 + ], + [ + 804, + 497 + ], + [ + 803, + 501 + ], + [ + 800, + 502 + ], + [ + 794, + 503 + ], + [ + 784, + 504 + ], + [ + 766, + 477 + ], + [ + 748, + 445 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 719, + 435 + ], + [ + 745, + 434 + ], + [ + 762, + 436 + ], + [ + 773, + 441 + ], + [ + 781, + 452 + ], + [ + 785, + 462 + ], + [ + 787, + 476 + ], + [ + 789, + 486 + ], + [ + 791, + 506 + ], + [ + 790, + 511 + ], + [ + 781, + 513 + ], + [ + 777, + 513 + ], + [ + 775, + 512 + ], + [ + 769, + 515 + ], + [ + 760, + 518 + ], + [ + 757, + 517 + ], + [ + 748, + 513 + ], + [ + 738, + 512 + ], + [ + 720, + 477 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 654, + 431 + ], + [ + 690, + 429 + ], + [ + 715, + 431 + ], + [ + 724, + 440 + ], + [ + 732, + 460 + ], + [ + 735, + 477 + ], + [ + 739, + 481 + ], + [ + 740, + 494 + ], + [ + 740, + 512 + ], + [ + 740, + 520 + ], + [ + 738, + 524 + ], + [ + 733, + 526 + ], + [ + 726, + 524 + ], + [ + 719, + 524 + ], + [ + 694, + 515 + ], + [ + 630, + 479 + ], + [ + 627, + 471 + ], + [ + 643, + 438 + ], + [ + 647, + 433 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 668, + 466 + ], + [ + 680, + 469 + ], + [ + 682, + 475 + ], + [ + 691, + 476 + ], + [ + 695, + 477 + ], + [ + 707, + 488 + ], + [ + 710, + 498 + ], + [ + 714, + 510 + ], + [ + 720, + 523 + ], + [ + 722, + 530 + ], + [ + 713, + 532 + ], + [ + 694, + 535 + ], + [ + 680, + 537 + ], + [ + 666, + 529 + ], + [ + 648, + 527 + ], + [ + 624, + 507 + ], + [ + 620, + 492 + ], + [ + 623, + 478 + ], + [ + 626, + 469 + ], + [ + 633, + 466 + ], + [ + 640, + 459 + ], + [ + 648, + 460 + ], + [ + 658, + 464 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 491, + 440 + ], + [ + 539, + 435 + ], + [ + 600, + 436 + ], + [ + 618, + 453 + ], + [ + 628, + 471 + ], + [ + 636, + 468 + ], + [ + 644, + 471 + ], + [ + 646, + 475 + ], + [ + 637, + 479 + ], + [ + 637, + 482 + ], + [ + 650, + 498 + ], + [ + 653, + 516 + ], + [ + 653, + 529 + ], + [ + 646, + 530 + ], + [ + 646, + 542 + ], + [ + 644, + 548 + ], + [ + 639, + 549 + ], + [ + 627, + 547 + ], + [ + 622, + 537 + ], + [ + 613, + 537 + ], + [ + 612, + 546 + ], + [ + 609, + 555 + ], + [ + 605, + 558 + ], + [ + 600, + 558 + ], + [ + 588, + 533 + ], + [ + 550, + 484 + ], + [ + 493, + 457 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 457, + 404 + ], + [ + 451, + 401 + ], + [ + 446, + 404 + ], + [ + 445, + 410 + ], + [ + 447, + 414 + ], + [ + 449, + 416 + ], + [ + 447, + 419 + ], + [ + 437, + 422 + ], + [ + 435, + 431 + ], + [ + 432, + 441 + ], + [ + 459, + 448 + ], + [ + 471, + 441 + ], + [ + 473, + 439 + ], + [ + 471, + 428 + ], + [ + 468, + 424 + ], + [ + 465, + 419 + ], + [ + 460, + 412 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 484, + 362 + ], + [ + 457, + 336 + ], + [ + 439, + 361 + ], + [ + 465, + 382 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 405, + 325 + ], + [ + 407, + 339 + ], + [ + 433, + 339 + ], + [ + 432, + 319 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 454, + 307 + ], + [ + 390, + 307 + ], + [ + 387, + 310 + ], + [ + 387, + 325 + ], + [ + 392, + 328 + ], + [ + 461, + 327 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 456, + 279 + ], + [ + 457, + 293 + ], + [ + 471, + 296 + ], + [ + 470, + 281 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 454, + 266 + ], + [ + 475, + 268 + ], + [ + 475, + 287 + ], + [ + 456, + 283 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 565, + 60 + ], + [ + 523, + 81 + ], + [ + 493, + 111 + ], + [ + 474, + 136 + ], + [ + 459, + 166 + ], + [ + 450, + 206 + ], + [ + 450, + 262 + ], + [ + 450, + 318 + ], + [ + 453, + 359 + ], + [ + 455, + 442 + ], + [ + 467, + 442 + ], + [ + 458, + 262 + ], + [ + 457, + 208 + ], + [ + 465, + 170 + ], + [ + 479, + 141 + ], + [ + 497, + 115 + ], + [ + 526, + 89 + ], + [ + 558, + 71 + ], + [ + 597, + 59 + ], + [ + 588, + 51 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 612, + 35 + ], + [ + 582, + 50 + ], + [ + 584, + 58 + ], + [ + 588, + 63 + ], + [ + 603, + 69 + ], + [ + 616, + 65 + ], + [ + 620, + 56 + ], + [ + 618, + 44 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 543, + 442 + ], + [ + 498, + 437 + ], + [ + 458, + 438 + ], + [ + 426, + 440 + ], + [ + 401, + 446 + ], + [ + 381, + 461 + ], + [ + 366, + 478 + ], + [ + 362, + 474 + ], + [ + 354, + 475 + ], + [ + 348, + 479 + ], + [ + 346, + 484 + ], + [ + 351, + 487 + ], + [ + 355, + 488 + ], + [ + 334, + 501 + ], + [ + 315, + 517 + ], + [ + 311, + 531 + ], + [ + 311, + 558 + ], + [ + 315, + 582 + ], + [ + 320, + 588 + ], + [ + 356, + 588 + ], + [ + 358, + 581 + ], + [ + 453, + 582 + ], + [ + 486, + 579 + ], + [ + 489, + 591 + ], + [ + 492, + 594 + ], + [ + 508, + 593 + ], + [ + 520, + 592 + ], + [ + 527, + 582 + ], + [ + 530, + 567 + ], + [ + 567, + 559 + ], + [ + 568, + 568 + ], + [ + 571, + 573 + ], + [ + 576, + 575 + ], + [ + 588, + 575 + ], + [ + 595, + 571 + ], + [ + 599, + 562 + ], + [ + 601, + 544 + ], + [ + 601, + 515 + ], + [ + 600, + 495 + ], + [ + 590, + 481 + ], + [ + 567, + 459 + ], + [ + 549, + 443 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 101, + 496 + ], + [ + 107, + 610 + ], + [ + 121, + 607 + ], + [ + 118, + 496 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1141, + 379 + ], + [ + 1118, + 379 + ], + [ + 1119, + 395 + ], + [ + 1142, + 395 + ], + [ + 1147, + 390 + ], + [ + 1146, + 378 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 188, + 487 + ], + [ + 191, + 591 + ], + [ + 204, + 589 + ], + [ + 202, + 486 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 25, + 604 + ], + [ + 27, + 573 + ], + [ + 22, + 540 + ], + [ + 12, + 502 + ], + [ + 0, + 483 + ], + [ + 0, + 623 + ], + [ + 5, + 624 + ], + [ + 20, + 617 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1091, + 393 + ], + [ + 1087, + 395 + ], + [ + 1086, + 401 + ], + [ + 1088, + 404 + ], + [ + 1092, + 405 + ], + [ + 1097, + 405 + ], + [ + 1098, + 403 + ], + [ + 1100, + 400 + ], + [ + 1099, + 396 + ], + [ + 1096, + 393 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1117, + 366 + ], + [ + 1117, + 429 + ], + [ + 1122, + 429 + ], + [ + 1122, + 367 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1108, + 395 + ], + [ + 1117, + 387 + ], + [ + 1128, + 394 + ], + [ + 1118, + 404 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1143, + 337 + ], + [ + 1146, + 414 + ], + [ + 1151, + 414 + ], + [ + 1146, + 336 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1158, + 383 + ], + [ + 1135, + 384 + ], + [ + 1135, + 406 + ], + [ + 1159, + 406 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1135, + 426 + ], + [ + 1115, + 439 + ], + [ + 1112, + 450 + ], + [ + 1112, + 460 + ], + [ + 1115, + 468 + ], + [ + 1118, + 469 + ], + [ + 1136, + 469 + ], + [ + 1144, + 442 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1181, + 408 + ], + [ + 1139, + 410 + ], + [ + 1134, + 415 + ], + [ + 1132, + 439 + ], + [ + 1127, + 468 + ], + [ + 1138, + 471 + ], + [ + 1143, + 483 + ], + [ + 1147, + 486 + ], + [ + 1164, + 465 + ], + [ + 1179, + 428 + ], + [ + 1181, + 418 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1189, + 416 + ], + [ + 1161, + 419 + ], + [ + 1146, + 447 + ], + [ + 1144, + 460 + ], + [ + 1145, + 473 + ], + [ + 1148, + 482 + ], + [ + 1163, + 478 + ], + [ + 1181, + 464 + ], + [ + 1187, + 457 + ], + [ + 1194, + 437 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1350, + 354 + ], + [ + 1270, + 354 + ], + [ + 1273, + 420 + ], + [ + 1260, + 421 + ], + [ + 1257, + 354 + ], + [ + 1178, + 356 + ], + [ + 1182, + 466 + ], + [ + 1336, + 439 + ], + [ + 1355, + 410 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1201, + 473 + ], + [ + 1187, + 456 + ], + [ + 1174, + 458 + ], + [ + 1165, + 462 + ], + [ + 1155, + 473 + ], + [ + 1146, + 483 + ], + [ + 1140, + 490 + ], + [ + 1138, + 497 + ], + [ + 1143, + 503 + ], + [ + 1164, + 500 + ], + [ + 1185, + 495 + ], + [ + 1201, + 489 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1203, + 369 + ], + [ + 1205, + 425 + ], + [ + 1209, + 425 + ], + [ + 1208, + 366 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1229, + 370 + ], + [ + 1206, + 332 + ], + [ + 1184, + 374 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1241, + 415 + ], + [ + 1206, + 418 + ], + [ + 1195, + 428 + ], + [ + 1184, + 451 + ], + [ + 1180, + 469 + ], + [ + 1180, + 479 + ], + [ + 1186, + 488 + ], + [ + 1198, + 503 + ], + [ + 1207, + 505 + ], + [ + 1218, + 498 + ], + [ + 1235, + 475 + ], + [ + 1252, + 444 + ], + [ + 1262, + 428 + ], + [ + 1268, + 417 + ], + [ + 1251, + 414 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1331, + 413 + ], + [ + 1282, + 414 + ], + [ + 1267, + 415 + ], + [ + 1245, + 425 + ], + [ + 1227, + 449 + ], + [ + 1223, + 463 + ], + [ + 1223, + 477 + ], + [ + 1228, + 490 + ], + [ + 1239, + 485 + ], + [ + 1273, + 470 + ], + [ + 1307, + 442 + ], + [ + 1321, + 423 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1260, + 457 + ], + [ + 1246, + 457 + ], + [ + 1227, + 469 + ], + [ + 1224, + 482 + ], + [ + 1220, + 494 + ], + [ + 1207, + 510 + ], + [ + 1200, + 518 + ], + [ + 1204, + 528 + ], + [ + 1216, + 532 + ], + [ + 1247, + 534 + ], + [ + 1269, + 533 + ], + [ + 1277, + 515 + ], + [ + 1271, + 486 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1297, + 418 + ], + [ + 1270, + 433 + ], + [ + 1253, + 455 + ], + [ + 1247, + 480 + ], + [ + 1253, + 510 + ], + [ + 1256, + 518 + ], + [ + 1271, + 518 + ], + [ + 1275, + 526 + ], + [ + 1283, + 534 + ], + [ + 1296, + 540 + ], + [ + 1307, + 539 + ], + [ + 1320, + 512 + ], + [ + 1329, + 474 + ], + [ + 1340, + 441 + ], + [ + 1350, + 421 + ], + [ + 1341, + 414 + ], + [ + 1332, + 413 + ], + [ + 1307, + 415 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1505, + 395 + ], + [ + 1398, + 398 + ], + [ + 1341, + 407 + ], + [ + 1303, + 442 + ], + [ + 1285, + 473 + ], + [ + 1281, + 501 + ], + [ + 1278, + 510 + ], + [ + 1279, + 521 + ], + [ + 1286, + 537 + ], + [ + 1332, + 539 + ], + [ + 1345, + 560 + ], + [ + 1364, + 569 + ], + [ + 1382, + 565 + ], + [ + 1391, + 552 + ], + [ + 1403, + 545 + ], + [ + 1456, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1541, + 413 + ], + [ + 1524, + 409 + ], + [ + 1506, + 412 + ], + [ + 1499, + 397 + ], + [ + 1442, + 413 + ], + [ + 1401, + 469 + ], + [ + 1396, + 507 + ], + [ + 1402, + 543 + ], + [ + 1414, + 567 + ], + [ + 1429, + 584 + ], + [ + 1457, + 590 + ], + [ + 1490, + 585 + ], + [ + 1508, + 472 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1623, + 383 + ], + [ + 1583, + 384 + ], + [ + 1538, + 387 + ], + [ + 1511, + 394 + ], + [ + 1500, + 396 + ], + [ + 1471, + 410 + ], + [ + 1431, + 464 + ], + [ + 1428, + 503 + ], + [ + 1425, + 526 + ], + [ + 1424, + 541 + ], + [ + 1434, + 564 + ], + [ + 1447, + 585 + ], + [ + 1462, + 593 + ], + [ + 1484, + 594 + ], + [ + 1498, + 612 + ], + [ + 1523, + 619 + ], + [ + 1541, + 617 + ], + [ + 1567, + 594 + ], + [ + 1592, + 570 + ], + [ + 1665, + 449 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1916, + 376 + ], + [ + 1890, + 363 + ], + [ + 1727, + 365 + ], + [ + 1718, + 372 + ], + [ + 1596, + 374 + ], + [ + 1584, + 378 + ], + [ + 1567, + 396 + ], + [ + 1525, + 461 + ], + [ + 1523, + 500 + ], + [ + 1516, + 539 + ], + [ + 1515, + 568 + ], + [ + 1523, + 587 + ], + [ + 1529, + 595 + ], + [ + 1572, + 603 + ], + [ + 1585, + 608 + ], + [ + 1614, + 609 + ], + [ + 1702, + 596 + ], + [ + 1843, + 515 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1745, + 647 + ], + [ + 1714, + 561 + ], + [ + 1695, + 556 + ], + [ + 1661, + 564 + ], + [ + 1639, + 570 + ], + [ + 1615, + 592 + ], + [ + 1601, + 608 + ], + [ + 1583, + 626 + ], + [ + 1555, + 643 + ], + [ + 1530, + 669 + ], + [ + 1525, + 697 + ], + [ + 1552, + 706 + ], + [ + 1597, + 708 + ], + [ + 1653, + 708 + ], + [ + 1686, + 703 + ], + [ + 1721, + 694 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1968, + 8 + ], + [ + 1990, + 342 + ], + [ + 2048, + 331 + ], + [ + 2037, + 120 + ], + [ + 2023, + 40 + ], + [ + 2021, + 0 + ], + [ + 1967, + 0 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 2014, + 310 + ], + [ + 1955, + 309 + ], + [ + 1888, + 324 + ], + [ + 1901, + 351 + ], + [ + 1887, + 357 + ], + [ + 1906, + 370 + ], + [ + 1844, + 382 + ], + [ + 1802, + 407 + ], + [ + 1781, + 443 + ], + [ + 1746, + 493 + ], + [ + 1725, + 520 + ], + [ + 1694, + 564 + ], + [ + 1682, + 595 + ], + [ + 1687, + 625 + ], + [ + 1680, + 632 + ], + [ + 1678, + 640 + ], + [ + 1684, + 653 + ], + [ + 1701, + 669 + ], + [ + 1741, + 692 + ], + [ + 1756, + 722 + ], + [ + 1785, + 738 + ], + [ + 1827, + 755 + ], + [ + 1856, + 749 + ], + [ + 1875, + 746 + ], + [ + 1901, + 771 + ], + [ + 1946, + 798 + ], + [ + 1994, + 800 + ], + [ + 2033, + 790 + ], + [ + 2048, + 769 + ], + [ + 2048, + 307 + ], + [ + 2025, + 309 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000171_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000171_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..6ba373d3ba48dbd3a2f654fe9cee4c252bd06beb Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000171_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000172_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000172_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..a706282bdd4a4f84d1b173761a3fd122b9763d53 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000172_000019_gtFine_polygons.json @@ -0,0 +1,7726 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 70, + 293 + ], + [ + 2048, + 369 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 276 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 493, + 480 + ], + [ + 734, + 484 + ], + [ + 924, + 481 + ], + [ + 1041, + 479 + ], + [ + 1092, + 476 + ], + [ + 1127, + 476 + ], + [ + 1162, + 473 + ], + [ + 1229, + 468 + ], + [ + 1339, + 464 + ], + [ + 1332, + 446 + ], + [ + 1102, + 445 + ], + [ + 936, + 448 + ], + [ + 464, + 459 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2039, + 681 + ], + [ + 1903, + 675 + ], + [ + 1816, + 654 + ], + [ + 1789, + 533 + ], + [ + 1840, + 491 + ], + [ + 2048, + 518 + ], + [ + 2048, + 680 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 1157, + 65 + ], + [ + 1712, + 304 + ], + [ + 1680, + 0 + ], + [ + 1163, + 0 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 81, + 478 + ], + [ + 555, + 465 + ], + [ + 712, + 472 + ], + [ + 923, + 465 + ], + [ + 1034, + 459 + ], + [ + 1250, + 443 + ], + [ + 1334, + 441 + ], + [ + 1431, + 440 + ], + [ + 1582, + 437 + ], + [ + 1923, + 510 + ], + [ + 2048, + 518 + ], + [ + 2048, + 0 + ], + [ + 1642, + 0 + ], + [ + 1644, + 189 + ], + [ + 1632, + 201 + ], + [ + 1629, + 252 + ], + [ + 1390, + 200 + ], + [ + 1247, + 93 + ], + [ + 1214, + 62 + ], + [ + 1216, + 36 + ], + [ + 1198, + 36 + ], + [ + 1197, + 47 + ], + [ + 1195, + 44 + ], + [ + 1194, + 24 + ], + [ + 1190, + 19 + ], + [ + 1189, + 0 + ], + [ + 1, + 0 + ], + [ + 0, + 484 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1343, + 26 + ], + [ + 1330, + 33 + ], + [ + 1330, + 26 + ], + [ + 1323, + 27 + ], + [ + 1322, + 33 + ], + [ + 1316, + 37 + ], + [ + 1311, + 39 + ], + [ + 1311, + 46 + ], + [ + 1298, + 49 + ], + [ + 1296, + 37 + ], + [ + 1277, + 39 + ], + [ + 1256, + 53 + ], + [ + 1232, + 68 + ], + [ + 1243, + 80 + ], + [ + 1272, + 71 + ], + [ + 1268, + 79 + ], + [ + 1258, + 83 + ], + [ + 1248, + 88 + ], + [ + 1235, + 94 + ], + [ + 1231, + 104 + ], + [ + 1233, + 111 + ], + [ + 1240, + 113 + ], + [ + 1247, + 116 + ], + [ + 1259, + 111 + ], + [ + 1270, + 112 + ], + [ + 1256, + 121 + ], + [ + 1245, + 128 + ], + [ + 1236, + 130 + ], + [ + 1238, + 138 + ], + [ + 1241, + 138 + ], + [ + 1236, + 148 + ], + [ + 1231, + 162 + ], + [ + 1245, + 185 + ], + [ + 1255, + 186 + ], + [ + 1268, + 186 + ], + [ + 1285, + 182 + ], + [ + 1304, + 182 + ], + [ + 1299, + 191 + ], + [ + 1290, + 197 + ], + [ + 1286, + 210 + ], + [ + 1281, + 220 + ], + [ + 1276, + 225 + ], + [ + 1277, + 231 + ], + [ + 1291, + 229 + ], + [ + 1311, + 222 + ], + [ + 1332, + 210 + ], + [ + 1343, + 205 + ], + [ + 1351, + 206 + ], + [ + 1345, + 223 + ], + [ + 1348, + 236 + ], + [ + 1347, + 240 + ], + [ + 1339, + 238 + ], + [ + 1340, + 249 + ], + [ + 1343, + 251 + ], + [ + 1374, + 238 + ], + [ + 1379, + 243 + ], + [ + 1379, + 250 + ], + [ + 1376, + 258 + ], + [ + 1383, + 262 + ], + [ + 1391, + 267 + ], + [ + 1401, + 263 + ], + [ + 1403, + 267 + ], + [ + 1384, + 275 + ], + [ + 1386, + 288 + ], + [ + 1380, + 289 + ], + [ + 1376, + 302 + ], + [ + 1361, + 314 + ], + [ + 1366, + 318 + ], + [ + 1377, + 322 + ], + [ + 1390, + 314 + ], + [ + 1405, + 299 + ], + [ + 1412, + 296 + ], + [ + 1416, + 296 + ], + [ + 1413, + 301 + ], + [ + 1417, + 306 + ], + [ + 1421, + 311 + ], + [ + 1433, + 316 + ], + [ + 1444, + 321 + ], + [ + 1458, + 327 + ], + [ + 1470, + 331 + ], + [ + 1473, + 342 + ], + [ + 1472, + 425 + ], + [ + 1490, + 425 + ], + [ + 1488, + 410 + ], + [ + 1485, + 373 + ], + [ + 1489, + 349 + ], + [ + 1486, + 329 + ], + [ + 1494, + 328 + ], + [ + 1504, + 330 + ], + [ + 1507, + 328 + ], + [ + 1505, + 324 + ], + [ + 1499, + 320 + ], + [ + 1504, + 316 + ], + [ + 1505, + 313 + ], + [ + 1493, + 307 + ], + [ + 1494, + 298 + ], + [ + 1497, + 301 + ], + [ + 1505, + 302 + ], + [ + 1513, + 297 + ], + [ + 1517, + 291 + ], + [ + 1525, + 290 + ], + [ + 1534, + 291 + ], + [ + 1534, + 295 + ], + [ + 1531, + 308 + ], + [ + 1531, + 324 + ], + [ + 1529, + 337 + ], + [ + 1530, + 350 + ], + [ + 1538, + 358 + ], + [ + 1546, + 363 + ], + [ + 1547, + 383 + ], + [ + 1565, + 394 + ], + [ + 1576, + 380 + ], + [ + 1577, + 370 + ], + [ + 1580, + 357 + ], + [ + 1588, + 352 + ], + [ + 1598, + 345 + ], + [ + 1605, + 338 + ], + [ + 1604, + 326 + ], + [ + 1600, + 322 + ], + [ + 1601, + 316 + ], + [ + 1630, + 315 + ], + [ + 1630, + 245 + ], + [ + 1632, + 202 + ], + [ + 1627, + 204 + ], + [ + 1622, + 197 + ], + [ + 1620, + 187 + ], + [ + 1617, + 160 + ], + [ + 1630, + 161 + ], + [ + 1638, + 158 + ], + [ + 1638, + 151 + ], + [ + 1645, + 149 + ], + [ + 1646, + 143 + ], + [ + 1644, + 140 + ], + [ + 1634, + 139 + ], + [ + 1638, + 136 + ], + [ + 1636, + 130 + ], + [ + 1632, + 128 + ], + [ + 1630, + 125 + ], + [ + 1630, + 119 + ], + [ + 1626, + 110 + ], + [ + 1617, + 97 + ], + [ + 1604, + 95 + ], + [ + 1594, + 86 + ], + [ + 1590, + 75 + ], + [ + 1576, + 76 + ], + [ + 1565, + 77 + ], + [ + 1564, + 68 + ], + [ + 1570, + 63 + ], + [ + 1572, + 61 + ], + [ + 1581, + 58 + ], + [ + 1589, + 53 + ], + [ + 1589, + 48 + ], + [ + 1587, + 40 + ], + [ + 1595, + 37 + ], + [ + 1601, + 34 + ], + [ + 1605, + 26 + ], + [ + 1598, + 22 + ], + [ + 1588, + 18 + ], + [ + 1579, + 8 + ], + [ + 1567, + 14 + ], + [ + 1553, + 4 + ], + [ + 1547, + 9 + ], + [ + 1532, + 0 + ], + [ + 1346, + 0 + ], + [ + 1348, + 7 + ], + [ + 1338, + 14 + ], + [ + 1355, + 17 + ], + [ + 1354, + 25 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1469, + 343 + ], + [ + 1468, + 419 + ], + [ + 1472, + 422 + ], + [ + 1473, + 343 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1465, + 369 + ], + [ + 1462, + 374 + ], + [ + 1462, + 380 + ], + [ + 1464, + 384 + ], + [ + 1468, + 386 + ], + [ + 1471, + 387 + ], + [ + 1476, + 386 + ], + [ + 1479, + 382 + ], + [ + 1479, + 375 + ], + [ + 1476, + 370 + ], + [ + 1470, + 368 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1466, + 356 + ], + [ + 1460, + 359 + ], + [ + 1460, + 363 + ], + [ + 1462, + 365 + ], + [ + 1464, + 365 + ], + [ + 1465, + 367 + ], + [ + 1468, + 369 + ], + [ + 1470, + 369 + ], + [ + 1472, + 367 + ], + [ + 1474, + 365 + ], + [ + 1474, + 357 + ], + [ + 1471, + 354 + ], + [ + 1469, + 354 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1602, + 366 + ], + [ + 1602, + 388 + ], + [ + 1623, + 388 + ], + [ + 1624, + 365 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1615, + 194 + ], + [ + 1615, + 200 + ], + [ + 1588, + 201 + ], + [ + 1589, + 185 + ], + [ + 1615, + 187 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1568, + 163 + ], + [ + 1636, + 166 + ], + [ + 1636, + 189 + ], + [ + 1568, + 189 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1457, + 10 + ], + [ + 1503, + 23 + ], + [ + 1535, + 39 + ], + [ + 1551, + 55 + ], + [ + 1562, + 74 + ], + [ + 1564, + 98 + ], + [ + 1565, + 142 + ], + [ + 1563, + 230 + ], + [ + 1559, + 239 + ], + [ + 1559, + 263 + ], + [ + 1562, + 364 + ], + [ + 1561, + 373 + ], + [ + 1562, + 389 + ], + [ + 1578, + 388 + ], + [ + 1578, + 373 + ], + [ + 1575, + 362 + ], + [ + 1575, + 324 + ], + [ + 1572, + 186 + ], + [ + 1572, + 96 + ], + [ + 1569, + 72 + ], + [ + 1558, + 51 + ], + [ + 1535, + 30 + ], + [ + 1499, + 15 + ], + [ + 1457, + 3 + ], + [ + 1438, + 0 + ], + [ + 1409, + 0 + ], + [ + 1426, + 3 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1582, + 228 + ], + [ + 1581, + 216 + ], + [ + 1569, + 221 + ], + [ + 1571, + 236 + ], + [ + 1582, + 232 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1584, + 195 + ], + [ + 1566, + 200 + ], + [ + 1567, + 224 + ], + [ + 1584, + 218 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1562, + 260 + ], + [ + 1558, + 261 + ], + [ + 1531, + 266 + ], + [ + 1533, + 279 + ], + [ + 1549, + 284 + ], + [ + 1550, + 290 + ], + [ + 1532, + 291 + ], + [ + 1534, + 304 + ], + [ + 1549, + 308 + ], + [ + 1550, + 314 + ], + [ + 1532, + 316 + ], + [ + 1536, + 329 + ], + [ + 1549, + 332 + ], + [ + 1550, + 337 + ], + [ + 1561, + 337 + ], + [ + 1568, + 322 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1583, + 260 + ], + [ + 1557, + 259 + ], + [ + 1561, + 337 + ], + [ + 1575, + 338 + ], + [ + 1586, + 333 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1658, + 246 + ], + [ + 1572, + 242 + ], + [ + 1611, + 315 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1742, + 281 + ], + [ + 1707, + 282 + ], + [ + 1708, + 294 + ], + [ + 1744, + 292 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1719, + 301 + ], + [ + 1735, + 300 + ], + [ + 1734, + 290 + ], + [ + 1717, + 291 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1705, + 372 + ], + [ + 1702, + 280 + ], + [ + 1709, + 281 + ], + [ + 1715, + 369 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1723, + 334 + ], + [ + 1714, + 332 + ], + [ + 1710, + 326 + ], + [ + 1706, + 327 + ], + [ + 1707, + 358 + ], + [ + 1714, + 359 + ], + [ + 1714, + 353 + ], + [ + 1723, + 351 + ], + [ + 1722, + 345 + ], + [ + 1713, + 344 + ], + [ + 1713, + 341 + ], + [ + 1723, + 338 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1906, + 441 + ], + [ + 1865, + 442 + ], + [ + 1854, + 464 + ], + [ + 1851, + 463 + ], + [ + 1850, + 451 + ], + [ + 1818, + 447 + ], + [ + 1795, + 450 + ], + [ + 1817, + 526 + ], + [ + 1966, + 531 + ], + [ + 1963, + 449 + ], + [ + 1950, + 431 + ], + [ + 1929, + 431 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1803, + 362 + ], + [ + 1803, + 446 + ], + [ + 1673, + 392 + ], + [ + 1674, + 369 + ], + [ + 1756, + 363 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1821, + 315 + ], + [ + 1800, + 311 + ], + [ + 1794, + 353 + ], + [ + 1821, + 359 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1802, + 201 + ], + [ + 1697, + 258 + ], + [ + 1697, + 269 + ], + [ + 1746, + 258 + ], + [ + 1820, + 266 + ], + [ + 2048, + 221 + ], + [ + 2048, + 129 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 2040, + 349 + ], + [ + 2031, + 363 + ], + [ + 2030, + 376 + ], + [ + 2008, + 384 + ], + [ + 2003, + 395 + ], + [ + 2007, + 439 + ], + [ + 2009, + 450 + ], + [ + 2048, + 452 + ], + [ + 2048, + 347 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1972, + 442 + ], + [ + 2048, + 443 + ], + [ + 2048, + 626 + ], + [ + 1974, + 609 + ], + [ + 1967, + 591 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1857, + 602 + ], + [ + 1878, + 581 + ], + [ + 1886, + 548 + ], + [ + 1893, + 502 + ], + [ + 1895, + 480 + ], + [ + 1894, + 360 + ], + [ + 1896, + 329 + ], + [ + 1903, + 270 + ], + [ + 1902, + 233 + ], + [ + 1902, + 204 + ], + [ + 1894, + 146 + ], + [ + 1888, + 100 + ], + [ + 1855, + 66 + ], + [ + 1827, + 22 + ], + [ + 1804, + 0 + ], + [ + 1834, + 0 + ], + [ + 1852, + 27 + ], + [ + 1869, + 51 + ], + [ + 1873, + 56 + ], + [ + 1882, + 52 + ], + [ + 1883, + 41 + ], + [ + 1878, + 0 + ], + [ + 1906, + 0 + ], + [ + 1907, + 11 + ], + [ + 1905, + 45 + ], + [ + 1909, + 76 + ], + [ + 1917, + 92 + ], + [ + 1922, + 121 + ], + [ + 1924, + 146 + ], + [ + 1928, + 166 + ], + [ + 1936, + 220 + ], + [ + 1936, + 249 + ], + [ + 1932, + 296 + ], + [ + 1933, + 368 + ], + [ + 1937, + 415 + ], + [ + 1936, + 477 + ], + [ + 1935, + 531 + ], + [ + 1939, + 559 + ], + [ + 1954, + 584 + ], + [ + 1972, + 604 + ], + [ + 1967, + 608 + ], + [ + 1941, + 608 + ], + [ + 1914, + 609 + ], + [ + 1885, + 603 + ], + [ + 1869, + 604 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1955, + 441 + ], + [ + 1961, + 605 + ], + [ + 1979, + 605 + ], + [ + 1972, + 440 + ], + [ + 1963, + 438 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1430, + 416 + ], + [ + 1429, + 406 + ], + [ + 1423, + 399 + ], + [ + 1419, + 383 + ], + [ + 1420, + 382 + ], + [ + 1425, + 380 + ], + [ + 1423, + 370 + ], + [ + 1419, + 362 + ], + [ + 1423, + 358 + ], + [ + 1431, + 354 + ], + [ + 1430, + 361 + ], + [ + 1433, + 375 + ], + [ + 1436, + 381 + ], + [ + 1439, + 392 + ], + [ + 1434, + 398 + ], + [ + 1432, + 402 + ], + [ + 1433, + 420 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1366, + 426 + ], + [ + 1332, + 426 + ], + [ + 1313, + 429 + ], + [ + 1310, + 432 + ], + [ + 1353, + 460 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1274, + 368 + ], + [ + 1277, + 410 + ], + [ + 1295, + 410 + ], + [ + 1292, + 368 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1306, + 344 + ], + [ + 1291, + 344 + ], + [ + 1285, + 349 + ], + [ + 1282, + 355 + ], + [ + 1282, + 364 + ], + [ + 1285, + 440 + ], + [ + 1287, + 440 + ], + [ + 1283, + 364 + ], + [ + 1284, + 354 + ], + [ + 1290, + 349 + ], + [ + 1296, + 346 + ], + [ + 1309, + 344 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1307, + 337 + ], + [ + 1303, + 344 + ], + [ + 1310, + 346 + ], + [ + 1315, + 345 + ], + [ + 1318, + 342 + ], + [ + 1315, + 338 + ], + [ + 1314, + 335 + ], + [ + 1310, + 333 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1335, + 436 + ], + [ + 1312, + 431 + ], + [ + 1296, + 431 + ], + [ + 1282, + 438 + ], + [ + 1282, + 462 + ], + [ + 1320, + 462 + ], + [ + 1322, + 466 + ], + [ + 1327, + 467 + ], + [ + 1332, + 466 + ], + [ + 1337, + 463 + ], + [ + 1351, + 449 + ], + [ + 1345, + 440 + ], + [ + 1338, + 437 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1480, + 415 + ], + [ + 1432, + 412 + ], + [ + 1376, + 413 + ], + [ + 1348, + 438 + ], + [ + 1335, + 463 + ], + [ + 1322, + 471 + ], + [ + 1313, + 497 + ], + [ + 1313, + 519 + ], + [ + 1314, + 537 + ], + [ + 1313, + 558 + ], + [ + 1315, + 575 + ], + [ + 1319, + 580 + ], + [ + 1330, + 582 + ], + [ + 1341, + 582 + ], + [ + 1344, + 576 + ], + [ + 1346, + 565 + ], + [ + 1355, + 564 + ], + [ + 1365, + 571 + ], + [ + 1365, + 587 + ], + [ + 1366, + 597 + ], + [ + 1373, + 603 + ], + [ + 1381, + 605 + ], + [ + 1388, + 603 + ], + [ + 1392, + 600 + ], + [ + 1397, + 593 + ], + [ + 1410, + 587 + ], + [ + 1441, + 570 + ], + [ + 1501, + 469 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1743, + 392 + ], + [ + 1652, + 384 + ], + [ + 1571, + 383 + ], + [ + 1522, + 388 + ], + [ + 1477, + 416 + ], + [ + 1459, + 438 + ], + [ + 1448, + 459 + ], + [ + 1445, + 455 + ], + [ + 1433, + 458 + ], + [ + 1429, + 465 + ], + [ + 1431, + 476 + ], + [ + 1436, + 482 + ], + [ + 1424, + 504 + ], + [ + 1415, + 526 + ], + [ + 1412, + 546 + ], + [ + 1406, + 574 + ], + [ + 1406, + 594 + ], + [ + 1410, + 615 + ], + [ + 1416, + 628 + ], + [ + 1430, + 632 + ], + [ + 1443, + 632 + ], + [ + 1453, + 629 + ], + [ + 1457, + 617 + ], + [ + 1491, + 633 + ], + [ + 1490, + 648 + ], + [ + 1491, + 660 + ], + [ + 1498, + 671 + ], + [ + 1513, + 674 + ], + [ + 1527, + 675 + ], + [ + 1533, + 672 + ], + [ + 1539, + 664 + ], + [ + 1547, + 639 + ], + [ + 1726, + 639 + ], + [ + 1742, + 647 + ], + [ + 1751, + 650 + ], + [ + 1769, + 652 + ], + [ + 1784, + 680 + ], + [ + 1808, + 683 + ], + [ + 1823, + 678 + ], + [ + 1834, + 662 + ], + [ + 1836, + 643 + ], + [ + 1843, + 632 + ], + [ + 1847, + 583 + ], + [ + 1845, + 557 + ], + [ + 1836, + 524 + ], + [ + 1828, + 486 + ], + [ + 1797, + 436 + ], + [ + 1767, + 403 + ], + [ + 1750, + 393 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1134, + 379 + ], + [ + 1182, + 396 + ], + [ + 1182, + 403 + ], + [ + 1238, + 403 + ], + [ + 1239, + 395 + ], + [ + 1192, + 380 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1229, + 333 + ], + [ + 1229, + 339 + ], + [ + 1226, + 343 + ], + [ + 1225, + 348 + ], + [ + 1227, + 354 + ], + [ + 1227, + 362 + ], + [ + 1226, + 368 + ], + [ + 1225, + 373 + ], + [ + 1222, + 378 + ], + [ + 1223, + 384 + ], + [ + 1225, + 389 + ], + [ + 1228, + 391 + ], + [ + 1233, + 392 + ], + [ + 1234, + 394 + ], + [ + 1235, + 415 + ], + [ + 1239, + 415 + ], + [ + 1239, + 398 + ], + [ + 1244, + 395 + ], + [ + 1250, + 395 + ], + [ + 1252, + 395 + ], + [ + 1257, + 395 + ], + [ + 1260, + 392 + ], + [ + 1260, + 387 + ], + [ + 1250, + 386 + ], + [ + 1249, + 382 + ], + [ + 1253, + 379 + ], + [ + 1251, + 376 + ], + [ + 1251, + 366 + ], + [ + 1246, + 363 + ], + [ + 1240, + 356 + ], + [ + 1240, + 348 + ], + [ + 1245, + 346 + ], + [ + 1246, + 341 + ], + [ + 1236, + 338 + ], + [ + 1233, + 329 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1268, + 422 + ], + [ + 1279, + 423 + ], + [ + 1285, + 434 + ], + [ + 1286, + 446 + ], + [ + 1288, + 457 + ], + [ + 1286, + 461 + ], + [ + 1278, + 463 + ], + [ + 1275, + 468 + ], + [ + 1268, + 470 + ], + [ + 1261, + 467 + ], + [ + 1255, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1267, + 414 + ], + [ + 1245, + 412 + ], + [ + 1212, + 410 + ], + [ + 1177, + 409 + ], + [ + 1159, + 413 + ], + [ + 1132, + 430 + ], + [ + 1180, + 440 + ], + [ + 1194, + 459 + ], + [ + 1205, + 465 + ], + [ + 1219, + 465 + ], + [ + 1221, + 468 + ], + [ + 1225, + 471 + ], + [ + 1229, + 472 + ], + [ + 1233, + 471 + ], + [ + 1236, + 471 + ], + [ + 1244, + 472 + ], + [ + 1249, + 472 + ], + [ + 1253, + 468 + ], + [ + 1254, + 464 + ], + [ + 1272, + 463 + ], + [ + 1275, + 443 + ], + [ + 1271, + 425 + ], + [ + 1270, + 415 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1204, + 427 + ], + [ + 1174, + 425 + ], + [ + 1152, + 424 + ], + [ + 1134, + 431 + ], + [ + 1143, + 444 + ], + [ + 1159, + 465 + ], + [ + 1165, + 467 + ], + [ + 1169, + 468 + ], + [ + 1172, + 470 + ], + [ + 1176, + 473 + ], + [ + 1180, + 473 + ], + [ + 1183, + 472 + ], + [ + 1188, + 472 + ], + [ + 1195, + 473 + ], + [ + 1199, + 473 + ], + [ + 1204, + 470 + ], + [ + 1208, + 463 + ], + [ + 1213, + 458 + ], + [ + 1217, + 453 + ], + [ + 1215, + 439 + ], + [ + 1207, + 428 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1114, + 423 + ], + [ + 1088, + 422 + ], + [ + 1070, + 428 + ], + [ + 1058, + 437 + ], + [ + 1033, + 440 + ], + [ + 1016, + 444 + ], + [ + 1013, + 451 + ], + [ + 1016, + 463 + ], + [ + 1020, + 466 + ], + [ + 1040, + 465 + ], + [ + 1111, + 466 + ], + [ + 1115, + 468 + ], + [ + 1121, + 468 + ], + [ + 1126, + 468 + ], + [ + 1131, + 467 + ], + [ + 1140, + 464 + ], + [ + 1145, + 461 + ], + [ + 1153, + 461 + ], + [ + 1160, + 458 + ], + [ + 1164, + 450 + ], + [ + 1165, + 442 + ], + [ + 1165, + 437 + ], + [ + 1159, + 437 + ], + [ + 1149, + 436 + ], + [ + 1133, + 426 + ], + [ + 1119, + 423 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1033, + 418 + ], + [ + 1036, + 372 + ], + [ + 1040, + 358 + ], + [ + 1040, + 332 + ], + [ + 1035, + 317 + ], + [ + 1033, + 313 + ], + [ + 1030, + 309 + ], + [ + 1022, + 305 + ], + [ + 1010, + 296 + ], + [ + 1012, + 287 + ], + [ + 1008, + 270 + ], + [ + 1012, + 270 + ], + [ + 1016, + 272 + ], + [ + 1017, + 265 + ], + [ + 1015, + 256 + ], + [ + 1008, + 250 + ], + [ + 1011, + 237 + ], + [ + 1014, + 232 + ], + [ + 1021, + 228 + ], + [ + 1017, + 223 + ], + [ + 1016, + 216 + ], + [ + 1010, + 202 + ], + [ + 1012, + 197 + ], + [ + 1017, + 189 + ], + [ + 1026, + 182 + ], + [ + 1032, + 177 + ], + [ + 1025, + 161 + ], + [ + 1022, + 153 + ], + [ + 1022, + 143 + ], + [ + 1025, + 141 + ], + [ + 1022, + 131 + ], + [ + 1028, + 127 + ], + [ + 1037, + 119 + ], + [ + 1033, + 107 + ], + [ + 1027, + 118 + ], + [ + 1024, + 122 + ], + [ + 1017, + 121 + ], + [ + 1008, + 121 + ], + [ + 1002, + 119 + ], + [ + 992, + 124 + ], + [ + 989, + 131 + ], + [ + 983, + 127 + ], + [ + 977, + 129 + ], + [ + 970, + 128 + ], + [ + 991, + 99 + ], + [ + 991, + 86 + ], + [ + 995, + 86 + ], + [ + 1001, + 90 + ], + [ + 1005, + 89 + ], + [ + 1007, + 77 + ], + [ + 1010, + 71 + ], + [ + 1011, + 65 + ], + [ + 1010, + 64 + ], + [ + 1001, + 58 + ], + [ + 988, + 45 + ], + [ + 986, + 35 + ], + [ + 1001, + 29 + ], + [ + 1003, + 19 + ], + [ + 1007, + 8 + ], + [ + 1004, + 4 + ], + [ + 1000, + 10 + ], + [ + 990, + 9 + ], + [ + 982, + 0 + ], + [ + 1109, + 0 + ], + [ + 1115, + 1 + ], + [ + 1119, + 6 + ], + [ + 1125, + 5 + ], + [ + 1126, + 3 + ], + [ + 1129, + 11 + ], + [ + 1132, + 19 + ], + [ + 1138, + 18 + ], + [ + 1145, + 14 + ], + [ + 1153, + 14 + ], + [ + 1158, + 18 + ], + [ + 1168, + 25 + ], + [ + 1171, + 34 + ], + [ + 1163, + 44 + ], + [ + 1157, + 50 + ], + [ + 1168, + 52 + ], + [ + 1175, + 55 + ], + [ + 1176, + 62 + ], + [ + 1174, + 64 + ], + [ + 1161, + 66 + ], + [ + 1155, + 70 + ], + [ + 1162, + 75 + ], + [ + 1175, + 86 + ], + [ + 1186, + 93 + ], + [ + 1191, + 96 + ], + [ + 1197, + 106 + ], + [ + 1198, + 114 + ], + [ + 1189, + 118 + ], + [ + 1185, + 121 + ], + [ + 1192, + 131 + ], + [ + 1208, + 138 + ], + [ + 1226, + 145 + ], + [ + 1238, + 147 + ], + [ + 1240, + 159 + ], + [ + 1245, + 175 + ], + [ + 1249, + 188 + ], + [ + 1252, + 194 + ], + [ + 1260, + 206 + ], + [ + 1265, + 212 + ], + [ + 1264, + 218 + ], + [ + 1259, + 224 + ], + [ + 1254, + 235 + ], + [ + 1257, + 238 + ], + [ + 1251, + 241 + ], + [ + 1247, + 238 + ], + [ + 1246, + 245 + ], + [ + 1244, + 258 + ], + [ + 1239, + 265 + ], + [ + 1236, + 262 + ], + [ + 1230, + 258 + ], + [ + 1225, + 256 + ], + [ + 1219, + 251 + ], + [ + 1210, + 254 + ], + [ + 1203, + 256 + ], + [ + 1202, + 251 + ], + [ + 1199, + 247 + ], + [ + 1192, + 246 + ], + [ + 1185, + 254 + ], + [ + 1179, + 258 + ], + [ + 1172, + 259 + ], + [ + 1163, + 259 + ], + [ + 1156, + 255 + ], + [ + 1151, + 250 + ], + [ + 1151, + 241 + ], + [ + 1145, + 245 + ], + [ + 1141, + 251 + ], + [ + 1135, + 256 + ], + [ + 1133, + 262 + ], + [ + 1129, + 269 + ], + [ + 1122, + 273 + ], + [ + 1113, + 276 + ], + [ + 1110, + 279 + ], + [ + 1104, + 281 + ], + [ + 1096, + 285 + ], + [ + 1095, + 289 + ], + [ + 1089, + 294 + ], + [ + 1085, + 296 + ], + [ + 1087, + 304 + ], + [ + 1085, + 309 + ], + [ + 1081, + 313 + ], + [ + 1076, + 317 + ], + [ + 1069, + 319 + ], + [ + 1063, + 323 + ], + [ + 1053, + 332 + ], + [ + 1049, + 391 + ], + [ + 1048, + 427 + ], + [ + 1045, + 443 + ], + [ + 1049, + 464 + ], + [ + 1029, + 464 + ], + [ + 1032, + 447 + ], + [ + 1034, + 423 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1155, + 470 + ], + [ + 1121, + 471 + ], + [ + 1090, + 472 + ], + [ + 1075, + 469 + ], + [ + 1058, + 468 + ], + [ + 1047, + 469 + ], + [ + 1034, + 468 + ], + [ + 1021, + 467 + ], + [ + 1023, + 464 + ], + [ + 1031, + 460 + ], + [ + 1040, + 462 + ], + [ + 1052, + 462 + ], + [ + 1068, + 462 + ], + [ + 1083, + 464 + ], + [ + 1099, + 463 + ], + [ + 1108, + 464 + ], + [ + 1116, + 466 + ], + [ + 1125, + 467 + ], + [ + 1133, + 466 + ], + [ + 1145, + 466 + ], + [ + 1150, + 466 + ], + [ + 1156, + 469 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1124, + 443 + ], + [ + 1128, + 468 + ], + [ + 1145, + 467 + ], + [ + 1138, + 440 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1132, + 448 + ], + [ + 1137, + 468 + ], + [ + 1140, + 468 + ], + [ + 1134, + 441 + ], + [ + 1130, + 442 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1112, + 439 + ], + [ + 1113, + 471 + ], + [ + 1116, + 471 + ], + [ + 1116, + 439 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1087, + 412 + ], + [ + 1093, + 420 + ], + [ + 1097, + 428 + ], + [ + 1098, + 438 + ], + [ + 1095, + 445 + ], + [ + 1097, + 458 + ], + [ + 1097, + 464 + ], + [ + 1097, + 466 + ], + [ + 1093, + 467 + ], + [ + 1092, + 466 + ], + [ + 1081, + 454 + ], + [ + 1078, + 453 + ], + [ + 1078, + 446 + ], + [ + 1084, + 432 + ], + [ + 1082, + 429 + ], + [ + 1082, + 426 + ], + [ + 1084, + 422 + ], + [ + 1082, + 419 + ], + [ + 1081, + 414 + ], + [ + 1083, + 412 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1089, + 443 + ], + [ + 1089, + 473 + ], + [ + 1093, + 473 + ], + [ + 1093, + 443 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1003, + 439 + ], + [ + 1003, + 472 + ], + [ + 1007, + 472 + ], + [ + 1007, + 440 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1038, + 443 + ], + [ + 1039, + 474 + ], + [ + 1035, + 474 + ], + [ + 1035, + 441 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1192, + 224 + ], + [ + 1191, + 259 + ], + [ + 1189, + 262 + ], + [ + 1176, + 262 + ], + [ + 1174, + 261 + ], + [ + 1173, + 257 + ], + [ + 1173, + 234 + ], + [ + 1173, + 226 + ], + [ + 1175, + 224 + ], + [ + 1183, + 223 + ], + [ + 1189, + 223 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1032, + 327 + ], + [ + 1033, + 335 + ], + [ + 1047, + 334 + ], + [ + 1047, + 326 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1055, + 318 + ], + [ + 1027, + 317 + ], + [ + 1027, + 329 + ], + [ + 1055, + 329 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1066, + 350 + ], + [ + 1058, + 350 + ], + [ + 1058, + 341 + ], + [ + 1066, + 342 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1055, + 334 + ], + [ + 1068, + 334 + ], + [ + 1069, + 345 + ], + [ + 1056, + 344 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1052, + 475 + ], + [ + 1059, + 475 + ], + [ + 1058, + 403 + ], + [ + 1058, + 354 + ], + [ + 1057, + 292 + ], + [ + 1058, + 280 + ], + [ + 1063, + 270 + ], + [ + 1072, + 263 + ], + [ + 1085, + 258 + ], + [ + 1110, + 249 + ], + [ + 1141, + 240 + ], + [ + 1175, + 232 + ], + [ + 1180, + 232 + ], + [ + 1180, + 229 + ], + [ + 1173, + 229 + ], + [ + 1074, + 258 + ], + [ + 1061, + 268 + ], + [ + 1054, + 283 + ], + [ + 1053, + 299 + ], + [ + 1054, + 319 + ], + [ + 1053, + 355 + ], + [ + 1053, + 416 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1027, + 358 + ], + [ + 1043, + 386 + ], + [ + 1054, + 356 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1070, + 362 + ], + [ + 1058, + 355 + ], + [ + 1055, + 362 + ], + [ + 1054, + 377 + ], + [ + 1057, + 386 + ], + [ + 1058, + 389 + ], + [ + 1065, + 389 + ], + [ + 1065, + 386 + ], + [ + 1070, + 382 + ], + [ + 1070, + 378 + ], + [ + 1065, + 378 + ], + [ + 1065, + 375 + ], + [ + 1070, + 375 + ], + [ + 1070, + 369 + ], + [ + 1065, + 368 + ], + [ + 1065, + 364 + ], + [ + 1069, + 364 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1064, + 357 + ], + [ + 1051, + 353 + ], + [ + 1050, + 355 + ], + [ + 1049, + 389 + ], + [ + 1055, + 389 + ], + [ + 1058, + 385 + ], + [ + 1064, + 382 + ], + [ + 1064, + 378 + ], + [ + 1056, + 378 + ], + [ + 1057, + 375 + ], + [ + 1064, + 373 + ], + [ + 1064, + 368 + ], + [ + 1056, + 368 + ], + [ + 1057, + 364 + ], + [ + 1064, + 362 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 991, + 412 + ], + [ + 989, + 420 + ], + [ + 985, + 423 + ], + [ + 983, + 429 + ], + [ + 983, + 435 + ], + [ + 987, + 442 + ], + [ + 988, + 451 + ], + [ + 986, + 461 + ], + [ + 986, + 465 + ], + [ + 988, + 467 + ], + [ + 994, + 468 + ], + [ + 999, + 467 + ], + [ + 999, + 464 + ], + [ + 995, + 456 + ], + [ + 997, + 443 + ], + [ + 997, + 430 + ], + [ + 1002, + 430 + ], + [ + 1003, + 424 + ], + [ + 1000, + 419 + ], + [ + 996, + 419 + ], + [ + 1000, + 424 + ], + [ + 996, + 422 + ], + [ + 996, + 418 + ], + [ + 996, + 414 + ], + [ + 994, + 412 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 950, + 437 + ], + [ + 951, + 473 + ], + [ + 954, + 473 + ], + [ + 954, + 437 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1241, + 31 + ], + [ + 1278, + 33 + ], + [ + 1282, + 27 + ], + [ + 1283, + 0 + ], + [ + 1237, + 0 + ], + [ + 1237, + 19 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 996, + 368 + ], + [ + 960, + 335 + ], + [ + 889, + 330 + ], + [ + 889, + 376 + ], + [ + 925, + 376 + ], + [ + 995, + 377 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 879, + 366 + ], + [ + 842, + 320 + ], + [ + 576, + 312 + ], + [ + 577, + 319 + ], + [ + 584, + 320 + ], + [ + 611, + 364 + ], + [ + 609, + 364 + ], + [ + 610, + 369 + ], + [ + 611, + 370 + ], + [ + 612, + 384 + ], + [ + 882, + 384 + ], + [ + 881, + 371 + ], + [ + 884, + 371 + ], + [ + 884, + 366 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 882, + 293 + ], + [ + 905, + 293 + ], + [ + 905, + 297 + ], + [ + 896, + 297 + ], + [ + 897, + 313 + ], + [ + 888, + 314 + ], + [ + 894, + 316 + ], + [ + 896, + 321 + ], + [ + 897, + 328 + ], + [ + 894, + 332 + ], + [ + 890, + 336 + ], + [ + 883, + 337 + ], + [ + 873, + 335 + ], + [ + 868, + 332 + ], + [ + 867, + 322 + ], + [ + 873, + 315 + ], + [ + 877, + 310 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 917, + 256 + ], + [ + 918, + 475 + ], + [ + 927, + 475 + ], + [ + 934, + 475 + ], + [ + 932, + 250 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 905, + 437 + ], + [ + 867, + 434 + ], + [ + 867, + 439 + ], + [ + 848, + 441 + ], + [ + 845, + 437 + ], + [ + 810, + 435 + ], + [ + 809, + 439 + ], + [ + 773, + 437 + ], + [ + 741, + 434 + ], + [ + 743, + 471 + ], + [ + 905, + 469 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 812, + 437 + ], + [ + 810, + 479 + ], + [ + 815, + 479 + ], + [ + 817, + 437 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 894, + 438 + ], + [ + 893, + 477 + ], + [ + 899, + 477 + ], + [ + 898, + 438 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 701, + 415 + ], + [ + 703, + 410 + ], + [ + 702, + 403 + ], + [ + 704, + 401 + ], + [ + 708, + 400 + ], + [ + 712, + 397 + ], + [ + 717, + 396 + ], + [ + 720, + 400 + ], + [ + 724, + 406 + ], + [ + 725, + 412 + ], + [ + 723, + 418 + ], + [ + 723, + 425 + ], + [ + 723, + 432 + ], + [ + 721, + 441 + ], + [ + 709, + 439 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 735, + 471 + ], + [ + 734, + 381 + ], + [ + 734, + 369 + ], + [ + 735, + 355 + ], + [ + 732, + 343 + ], + [ + 728, + 335 + ], + [ + 719, + 338 + ], + [ + 713, + 348 + ], + [ + 704, + 358 + ], + [ + 700, + 364 + ], + [ + 691, + 367 + ], + [ + 692, + 357 + ], + [ + 703, + 343 + ], + [ + 703, + 332 + ], + [ + 699, + 325 + ], + [ + 689, + 331 + ], + [ + 679, + 332 + ], + [ + 680, + 339 + ], + [ + 669, + 342 + ], + [ + 665, + 339 + ], + [ + 657, + 333 + ], + [ + 649, + 326 + ], + [ + 650, + 321 + ], + [ + 666, + 326 + ], + [ + 677, + 327 + ], + [ + 670, + 319 + ], + [ + 664, + 311 + ], + [ + 678, + 308 + ], + [ + 680, + 310 + ], + [ + 686, + 320 + ], + [ + 696, + 319 + ], + [ + 699, + 307 + ], + [ + 706, + 305 + ], + [ + 723, + 306 + ], + [ + 706, + 295 + ], + [ + 686, + 283 + ], + [ + 670, + 275 + ], + [ + 662, + 278 + ], + [ + 658, + 285 + ], + [ + 649, + 290 + ], + [ + 634, + 296 + ], + [ + 631, + 304 + ], + [ + 623, + 307 + ], + [ + 613, + 309 + ], + [ + 602, + 303 + ], + [ + 585, + 301 + ], + [ + 581, + 286 + ], + [ + 574, + 270 + ], + [ + 576, + 266 + ], + [ + 590, + 268 + ], + [ + 590, + 263 + ], + [ + 581, + 261 + ], + [ + 576, + 253 + ], + [ + 581, + 248 + ], + [ + 596, + 247 + ], + [ + 606, + 241 + ], + [ + 597, + 235 + ], + [ + 607, + 233 + ], + [ + 614, + 235 + ], + [ + 613, + 227 + ], + [ + 600, + 223 + ], + [ + 595, + 227 + ], + [ + 587, + 226 + ], + [ + 584, + 222 + ], + [ + 581, + 218 + ], + [ + 568, + 215 + ], + [ + 572, + 208 + ], + [ + 577, + 206 + ], + [ + 592, + 215 + ], + [ + 599, + 215 + ], + [ + 591, + 201 + ], + [ + 600, + 199 + ], + [ + 608, + 194 + ], + [ + 613, + 197 + ], + [ + 615, + 197 + ], + [ + 612, + 185 + ], + [ + 611, + 179 + ], + [ + 582, + 176 + ], + [ + 588, + 166 + ], + [ + 582, + 159 + ], + [ + 575, + 153 + ], + [ + 580, + 150 + ], + [ + 591, + 157 + ], + [ + 603, + 161 + ], + [ + 609, + 163 + ], + [ + 629, + 174 + ], + [ + 638, + 173 + ], + [ + 647, + 166 + ], + [ + 657, + 165 + ], + [ + 664, + 167 + ], + [ + 662, + 150 + ], + [ + 652, + 145 + ], + [ + 648, + 146 + ], + [ + 637, + 142 + ], + [ + 627, + 134 + ], + [ + 615, + 126 + ], + [ + 606, + 118 + ], + [ + 601, + 113 + ], + [ + 607, + 110 + ], + [ + 616, + 117 + ], + [ + 617, + 112 + ], + [ + 617, + 102 + ], + [ + 619, + 88 + ], + [ + 618, + 79 + ], + [ + 627, + 85 + ], + [ + 634, + 93 + ], + [ + 638, + 92 + ], + [ + 636, + 87 + ], + [ + 645, + 90 + ], + [ + 652, + 94 + ], + [ + 660, + 96 + ], + [ + 671, + 95 + ], + [ + 667, + 91 + ], + [ + 656, + 85 + ], + [ + 660, + 78 + ], + [ + 661, + 71 + ], + [ + 654, + 53 + ], + [ + 656, + 40 + ], + [ + 659, + 16 + ], + [ + 669, + 16 + ], + [ + 666, + 25 + ], + [ + 670, + 43 + ], + [ + 675, + 58 + ], + [ + 681, + 69 + ], + [ + 683, + 73 + ], + [ + 691, + 63 + ], + [ + 689, + 49 + ], + [ + 685, + 38 + ], + [ + 690, + 32 + ], + [ + 697, + 30 + ], + [ + 705, + 26 + ], + [ + 714, + 27 + ], + [ + 718, + 31 + ], + [ + 726, + 28 + ], + [ + 736, + 30 + ], + [ + 736, + 11 + ], + [ + 734, + 0 + ], + [ + 744, + 0 + ], + [ + 745, + 10 + ], + [ + 753, + 12 + ], + [ + 758, + 14 + ], + [ + 764, + 20 + ], + [ + 767, + 27 + ], + [ + 770, + 19 + ], + [ + 776, + 7 + ], + [ + 782, + 0 + ], + [ + 801, + 0 + ], + [ + 801, + 5 + ], + [ + 804, + 8 + ], + [ + 808, + 11 + ], + [ + 808, + 14 + ], + [ + 815, + 12 + ], + [ + 817, + 6 + ], + [ + 822, + 2 + ], + [ + 826, + 5 + ], + [ + 827, + 15 + ], + [ + 836, + 21 + ], + [ + 835, + 26 + ], + [ + 827, + 30 + ], + [ + 820, + 38 + ], + [ + 824, + 44 + ], + [ + 820, + 50 + ], + [ + 815, + 59 + ], + [ + 824, + 60 + ], + [ + 826, + 57 + ], + [ + 831, + 51 + ], + [ + 834, + 46 + ], + [ + 844, + 37 + ], + [ + 843, + 52 + ], + [ + 839, + 59 + ], + [ + 835, + 66 + ], + [ + 838, + 73 + ], + [ + 840, + 81 + ], + [ + 840, + 87 + ], + [ + 834, + 89 + ], + [ + 822, + 93 + ], + [ + 821, + 100 + ], + [ + 828, + 103 + ], + [ + 843, + 100 + ], + [ + 852, + 95 + ], + [ + 860, + 90 + ], + [ + 863, + 88 + ], + [ + 867, + 82 + ], + [ + 875, + 73 + ], + [ + 883, + 65 + ], + [ + 884, + 70 + ], + [ + 879, + 82 + ], + [ + 880, + 88 + ], + [ + 883, + 91 + ], + [ + 883, + 102 + ], + [ + 878, + 105 + ], + [ + 893, + 100 + ], + [ + 894, + 109 + ], + [ + 890, + 121 + ], + [ + 884, + 129 + ], + [ + 887, + 131 + ], + [ + 899, + 127 + ], + [ + 900, + 132 + ], + [ + 886, + 143 + ], + [ + 889, + 153 + ], + [ + 903, + 147 + ], + [ + 912, + 135 + ], + [ + 918, + 140 + ], + [ + 921, + 156 + ], + [ + 917, + 167 + ], + [ + 910, + 174 + ], + [ + 909, + 179 + ], + [ + 908, + 187 + ], + [ + 893, + 194 + ], + [ + 883, + 192 + ], + [ + 876, + 190 + ], + [ + 884, + 201 + ], + [ + 892, + 214 + ], + [ + 908, + 214 + ], + [ + 914, + 209 + ], + [ + 921, + 208 + ], + [ + 935, + 208 + ], + [ + 944, + 209 + ], + [ + 937, + 214 + ], + [ + 936, + 228 + ], + [ + 939, + 235 + ], + [ + 941, + 243 + ], + [ + 951, + 245 + ], + [ + 959, + 236 + ], + [ + 964, + 234 + ], + [ + 963, + 244 + ], + [ + 950, + 253 + ], + [ + 935, + 256 + ], + [ + 924, + 259 + ], + [ + 910, + 261 + ], + [ + 903, + 262 + ], + [ + 902, + 263 + ], + [ + 902, + 270 + ], + [ + 888, + 272 + ], + [ + 888, + 281 + ], + [ + 899, + 290 + ], + [ + 896, + 298 + ], + [ + 886, + 301 + ], + [ + 887, + 305 + ], + [ + 886, + 313 + ], + [ + 875, + 318 + ], + [ + 865, + 319 + ], + [ + 858, + 326 + ], + [ + 860, + 339 + ], + [ + 852, + 338 + ], + [ + 848, + 326 + ], + [ + 855, + 315 + ], + [ + 854, + 306 + ], + [ + 840, + 312 + ], + [ + 839, + 323 + ], + [ + 836, + 330 + ], + [ + 827, + 334 + ], + [ + 817, + 335 + ], + [ + 803, + 337 + ], + [ + 786, + 339 + ], + [ + 776, + 342 + ], + [ + 760, + 345 + ], + [ + 749, + 347 + ], + [ + 743, + 381 + ], + [ + 745, + 471 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 746, + 373 + ], + [ + 745, + 382 + ], + [ + 747, + 474 + ], + [ + 750, + 473 + ], + [ + 747, + 379 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 747, + 336 + ], + [ + 740, + 339 + ], + [ + 735, + 347 + ], + [ + 736, + 355 + ], + [ + 742, + 364 + ], + [ + 748, + 366 + ], + [ + 752, + 361 + ], + [ + 755, + 354 + ], + [ + 754, + 343 + ], + [ + 751, + 337 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 754, + 363 + ], + [ + 755, + 383 + ], + [ + 741, + 384 + ], + [ + 741, + 363 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 523, + 267 + ], + [ + 523, + 285 + ], + [ + 528, + 285 + ], + [ + 528, + 289 + ], + [ + 523, + 289 + ], + [ + 522, + 312 + ], + [ + 522, + 318 + ], + [ + 522, + 327 + ], + [ + 518, + 329 + ], + [ + 510, + 329 + ], + [ + 503, + 325 + ], + [ + 500, + 313 + ], + [ + 502, + 306 + ], + [ + 503, + 297 + ], + [ + 503, + 292 + ], + [ + 505, + 289 + ], + [ + 517, + 288 + ], + [ + 517, + 281 + ], + [ + 518, + 268 + ], + [ + 519, + 266 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 545, + 435 + ], + [ + 548, + 427 + ], + [ + 547, + 418 + ], + [ + 542, + 408 + ], + [ + 522, + 402 + ], + [ + 520, + 414 + ], + [ + 514, + 423 + ], + [ + 513, + 430 + ], + [ + 525, + 443 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 690, + 409 + ], + [ + 658, + 407 + ], + [ + 613, + 407 + ], + [ + 590, + 411 + ], + [ + 553, + 426 + ], + [ + 536, + 434 + ], + [ + 511, + 447 + ], + [ + 497, + 462 + ], + [ + 496, + 476 + ], + [ + 498, + 486 + ], + [ + 512, + 495 + ], + [ + 529, + 497 + ], + [ + 542, + 493 + ], + [ + 552, + 491 + ], + [ + 565, + 489 + ], + [ + 571, + 484 + ], + [ + 582, + 484 + ], + [ + 645, + 483 + ], + [ + 646, + 486 + ], + [ + 654, + 490 + ], + [ + 661, + 490 + ], + [ + 668, + 487 + ], + [ + 671, + 484 + ], + [ + 676, + 484 + ], + [ + 684, + 489 + ], + [ + 690, + 490 + ], + [ + 700, + 489 + ], + [ + 705, + 486 + ], + [ + 707, + 483 + ], + [ + 712, + 478 + ], + [ + 723, + 474 + ], + [ + 730, + 472 + ], + [ + 731, + 465 + ], + [ + 727, + 451 + ], + [ + 717, + 432 + ], + [ + 706, + 414 + ], + [ + 695, + 408 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 282, + 630 + ], + [ + 424, + 636 + ], + [ + 535, + 605 + ], + [ + 605, + 581 + ], + [ + 630, + 567 + ], + [ + 613, + 556 + ], + [ + 560, + 555 + ], + [ + 514, + 554 + ], + [ + 452, + 548 + ], + [ + 379, + 539 + ], + [ + 243, + 542 + ], + [ + 229, + 580 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 423, + 367 + ], + [ + 418, + 468 + ], + [ + 423, + 468 + ], + [ + 427, + 367 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 464, + 170 + ], + [ + 468, + 471 + ], + [ + 480, + 470 + ], + [ + 480, + 457 + ], + [ + 476, + 171 + ], + [ + 476, + 169 + ] + ] + }, + { + "label": "bicyclegroup", + "polygon": [ + [ + 410, + 434 + ], + [ + 423, + 428 + ], + [ + 437, + 428 + ], + [ + 458, + 432 + ], + [ + 477, + 435 + ], + [ + 486, + 435 + ], + [ + 494, + 437 + ], + [ + 495, + 439 + ], + [ + 491, + 441 + ], + [ + 483, + 442 + ], + [ + 468, + 442 + ], + [ + 467, + 443 + ], + [ + 471, + 450 + ], + [ + 485, + 453 + ], + [ + 491, + 452 + ], + [ + 496, + 453 + ], + [ + 504, + 457 + ], + [ + 514, + 464 + ], + [ + 526, + 469 + ], + [ + 536, + 481 + ], + [ + 548, + 493 + ], + [ + 548, + 516 + ], + [ + 546, + 534 + ], + [ + 537, + 558 + ], + [ + 524, + 567 + ], + [ + 516, + 567 + ], + [ + 507, + 563 + ], + [ + 498, + 567 + ], + [ + 484, + 571 + ], + [ + 460, + 565 + ], + [ + 418, + 535 + ], + [ + 417, + 486 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 485, + 580 + ], + [ + 486, + 467 + ], + [ + 497, + 467 + ], + [ + 491, + 579 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 554, + 458 + ], + [ + 553, + 565 + ], + [ + 563, + 565 + ], + [ + 562, + 458 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 528, + 463 + ], + [ + 528, + 569 + ], + [ + 537, + 569 + ], + [ + 537, + 464 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 600, + 396 + ], + [ + 591, + 405 + ], + [ + 594, + 412 + ], + [ + 590, + 419 + ], + [ + 583, + 434 + ], + [ + 584, + 453 + ], + [ + 582, + 469 + ], + [ + 580, + 479 + ], + [ + 581, + 487 + ], + [ + 583, + 490 + ], + [ + 587, + 492 + ], + [ + 587, + 507 + ], + [ + 589, + 517 + ], + [ + 591, + 519 + ], + [ + 594, + 523 + ], + [ + 590, + 565 + ], + [ + 604, + 565 + ], + [ + 613, + 566 + ], + [ + 621, + 564 + ], + [ + 622, + 560 + ], + [ + 614, + 556 + ], + [ + 609, + 554 + ], + [ + 611, + 538 + ], + [ + 617, + 513 + ], + [ + 618, + 495 + ], + [ + 624, + 467 + ], + [ + 623, + 450 + ], + [ + 618, + 437 + ], + [ + 607, + 425 + ], + [ + 614, + 423 + ], + [ + 617, + 405 + ], + [ + 619, + 402 + ], + [ + 607, + 395 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 60, + 6 + ], + [ + 56, + 59 + ], + [ + 63, + 138 + ], + [ + 56, + 229 + ], + [ + 69, + 231 + ], + [ + 78, + 229 + ], + [ + 84, + 217 + ], + [ + 99, + 217 + ], + [ + 105, + 223 + ], + [ + 109, + 237 + ], + [ + 110, + 247 + ], + [ + 122, + 254 + ], + [ + 134, + 243 + ], + [ + 146, + 253 + ], + [ + 162, + 252 + ], + [ + 186, + 258 + ], + [ + 193, + 247 + ], + [ + 184, + 238 + ], + [ + 177, + 222 + ], + [ + 161, + 217 + ], + [ + 147, + 210 + ], + [ + 161, + 198 + ], + [ + 161, + 186 + ], + [ + 152, + 181 + ], + [ + 141, + 183 + ], + [ + 123, + 179 + ], + [ + 115, + 175 + ], + [ + 129, + 158 + ], + [ + 143, + 141 + ], + [ + 160, + 143 + ], + [ + 178, + 144 + ], + [ + 188, + 128 + ], + [ + 180, + 115 + ], + [ + 190, + 91 + ], + [ + 194, + 82 + ], + [ + 200, + 49 + ], + [ + 215, + 49 + ], + [ + 222, + 57 + ], + [ + 224, + 44 + ], + [ + 217, + 22 + ], + [ + 208, + 13 + ], + [ + 225, + 14 + ], + [ + 247, + 28 + ], + [ + 243, + 18 + ], + [ + 240, + 0 + ], + [ + 406, + 0 + ], + [ + 406, + 8 + ], + [ + 423, + 21 + ], + [ + 432, + 30 + ], + [ + 446, + 43 + ], + [ + 437, + 52 + ], + [ + 428, + 60 + ], + [ + 417, + 57 + ], + [ + 395, + 45 + ], + [ + 382, + 41 + ], + [ + 366, + 37 + ], + [ + 370, + 52 + ], + [ + 371, + 75 + ], + [ + 390, + 80 + ], + [ + 394, + 79 + ], + [ + 394, + 92 + ], + [ + 394, + 107 + ], + [ + 394, + 123 + ], + [ + 376, + 135 + ], + [ + 386, + 143 + ], + [ + 390, + 154 + ], + [ + 406, + 160 + ], + [ + 419, + 164 + ], + [ + 434, + 169 + ], + [ + 438, + 182 + ], + [ + 450, + 195 + ], + [ + 454, + 203 + ], + [ + 458, + 216 + ], + [ + 465, + 216 + ], + [ + 465, + 257 + ], + [ + 470, + 301 + ], + [ + 465, + 333 + ], + [ + 457, + 350 + ], + [ + 441, + 347 + ], + [ + 430, + 345 + ], + [ + 411, + 337 + ], + [ + 399, + 343 + ], + [ + 393, + 351 + ], + [ + 390, + 364 + ], + [ + 389, + 365 + ], + [ + 388, + 382 + ], + [ + 399, + 389 + ], + [ + 412, + 400 + ], + [ + 409, + 406 + ], + [ + 397, + 415 + ], + [ + 398, + 426 + ], + [ + 410, + 428 + ], + [ + 426, + 433 + ], + [ + 436, + 436 + ], + [ + 452, + 453 + ], + [ + 448, + 476 + ], + [ + 439, + 492 + ], + [ + 439, + 508 + ], + [ + 453, + 522 + ], + [ + 463, + 544 + ], + [ + 469, + 558 + ], + [ + 472, + 581 + ], + [ + 478, + 603 + ], + [ + 464, + 617 + ], + [ + 442, + 628 + ], + [ + 427, + 638 + ], + [ + 405, + 641 + ], + [ + 382, + 641 + ], + [ + 347, + 642 + ], + [ + 324, + 643 + ], + [ + 308, + 641 + ], + [ + 283, + 609 + ], + [ + 221, + 548 + ], + [ + 0, + 463 + ], + [ + 0, + 0 + ], + [ + 62, + 0 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 412, + 194 + ], + [ + 414, + 212 + ], + [ + 448, + 212 + ], + [ + 447, + 192 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 397, + 173 + ], + [ + 397, + 198 + ], + [ + 460, + 198 + ], + [ + 467, + 198 + ], + [ + 477, + 198 + ], + [ + 476, + 192 + ], + [ + 464, + 192 + ], + [ + 459, + 189 + ], + [ + 459, + 181 + ], + [ + 467, + 182 + ], + [ + 476, + 182 + ], + [ + 476, + 177 + ], + [ + 465, + 177 + ], + [ + 462, + 174 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 457, + 224 + ], + [ + 457, + 197 + ], + [ + 473, + 205 + ], + [ + 473, + 228 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 499, + 260 + ], + [ + 479, + 256 + ], + [ + 478, + 242 + ], + [ + 463, + 242 + ], + [ + 463, + 242 + ], + [ + 461, + 335 + ], + [ + 473, + 340 + ], + [ + 476, + 333 + ], + [ + 498, + 324 + ], + [ + 501, + 311 + ], + [ + 485, + 310 + ], + [ + 484, + 302 + ], + [ + 499, + 295 + ], + [ + 500, + 284 + ], + [ + 479, + 284 + ], + [ + 479, + 277 + ], + [ + 501, + 270 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 427, + 355 + ], + [ + 420, + 351 + ], + [ + 417, + 342 + ], + [ + 417, + 333 + ], + [ + 422, + 326 + ], + [ + 429, + 323 + ], + [ + 435, + 323 + ], + [ + 438, + 325 + ], + [ + 439, + 333 + ], + [ + 439, + 342 + ], + [ + 435, + 350 + ], + [ + 429, + 355 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 436, + 351 + ], + [ + 420, + 351 + ], + [ + 418, + 373 + ], + [ + 435, + 373 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 368, + 106 + ], + [ + 367, + 325 + ], + [ + 371, + 334 + ], + [ + 372, + 343 + ], + [ + 372, + 348 + ], + [ + 372, + 355 + ], + [ + 378, + 368 + ], + [ + 381, + 373 + ], + [ + 376, + 40 + ], + [ + 365, + 40 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 49, + 412 + ], + [ + 132, + 442 + ], + [ + 202, + 476 + ], + [ + 323, + 490 + ], + [ + 332, + 525 + ], + [ + 329, + 558 + ], + [ + 337, + 564 + ], + [ + 349, + 574 + ], + [ + 350, + 617 + ], + [ + 323, + 636 + ], + [ + 291, + 656 + ], + [ + 262, + 669 + ], + [ + 219, + 674 + ], + [ + 196, + 689 + ], + [ + 172, + 695 + ], + [ + 146, + 698 + ], + [ + 119, + 687 + ], + [ + 52, + 598 + ], + [ + 0, + 509 + ], + [ + 0, + 409 + ], + [ + 29, + 410 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 133, + 713 + ], + [ + 85, + 731 + ], + [ + 26, + 744 + ], + [ + 0, + 744 + ], + [ + 0, + 452 + ], + [ + 21, + 476 + ], + [ + 58, + 510 + ], + [ + 91, + 535 + ], + [ + 122, + 561 + ], + [ + 159, + 591 + ], + [ + 182, + 620 + ], + [ + 188, + 649 + ], + [ + 180, + 677 + ], + [ + 163, + 696 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000173_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000173_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..6546a1efeb1b67d0aa3b704e43150de1f85ac5de Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000173_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000174_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000174_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..aca2d91f45ece2139eb94abd00e438506fe78506 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000174_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000174_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000174_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..b378c96a6d7640f52e1baa1a4cba943c15a39c9e --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000174_000019_gtFine_polygons.json @@ -0,0 +1,4352 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sidewalk", + "polygon": [ + [ + 283, + 580 + ], + [ + 308, + 581 + ], + [ + 646, + 514 + ], + [ + 660, + 480 + ], + [ + 124, + 552 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 164, + 262 + ], + [ + 2048, + 362 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 244 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 678, + 11 + ], + [ + 736, + 263 + ], + [ + 1436, + 231 + ], + [ + 1419, + 0 + ], + [ + 670, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2021, + 620 + ], + [ + 1819, + 608 + ], + [ + 1694, + 591 + ], + [ + 1578, + 563 + ], + [ + 1423, + 526 + ], + [ + 1368, + 514 + ], + [ + 1319, + 507 + ], + [ + 1277, + 497 + ], + [ + 1240, + 487 + ], + [ + 1206, + 479 + ], + [ + 1187, + 468 + ], + [ + 1216, + 465 + ], + [ + 1276, + 456 + ], + [ + 1317, + 462 + ], + [ + 1457, + 469 + ], + [ + 2048, + 520 + ], + [ + 2048, + 622 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 10, + 580 + ], + [ + 424, + 518 + ], + [ + 622, + 501 + ], + [ + 672, + 480 + ], + [ + 770, + 469 + ], + [ + 864, + 464 + ], + [ + 953, + 459 + ], + [ + 1044, + 457 + ], + [ + 1123, + 453 + ], + [ + 1210, + 454 + ], + [ + 1288, + 458 + ], + [ + 1335, + 466 + ], + [ + 1368, + 477 + ], + [ + 1468, + 489 + ], + [ + 1813, + 530 + ], + [ + 1919, + 545 + ], + [ + 2048, + 551 + ], + [ + 2048, + 0 + ], + [ + 1286, + 0 + ], + [ + 1306, + 6 + ], + [ + 1308, + 61 + ], + [ + 1297, + 79 + ], + [ + 1301, + 105 + ], + [ + 1294, + 119 + ], + [ + 1299, + 123 + ], + [ + 1301, + 177 + ], + [ + 1242, + 183 + ], + [ + 1215, + 179 + ], + [ + 1214, + 171 + ], + [ + 1201, + 167 + ], + [ + 1195, + 182 + ], + [ + 1178, + 191 + ], + [ + 1181, + 175 + ], + [ + 1172, + 175 + ], + [ + 1171, + 198 + ], + [ + 1155, + 217 + ], + [ + 1141, + 218 + ], + [ + 1141, + 206 + ], + [ + 1120, + 208 + ], + [ + 1120, + 216 + ], + [ + 1087, + 213 + ], + [ + 1087, + 199 + ], + [ + 1075, + 200 + ], + [ + 1074, + 211 + ], + [ + 1059, + 188 + ], + [ + 1059, + 181 + ], + [ + 1004, + 178 + ], + [ + 982, + 174 + ], + [ + 978, + 164 + ], + [ + 974, + 175 + ], + [ + 881, + 172 + ], + [ + 818, + 180 + ], + [ + 818, + 171 + ], + [ + 805, + 172 + ], + [ + 805, + 181 + ], + [ + 800, + 183 + ], + [ + 777, + 215 + ], + [ + 772, + 38 + ], + [ + 730, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 580 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1217, + 436 + ], + [ + 1224, + 430 + ], + [ + 1303, + 434 + ], + [ + 1303, + 461 + ], + [ + 1210, + 460 + ], + [ + 1210, + 447 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1174, + 470 + ], + [ + 1163, + 466 + ], + [ + 1163, + 462 + ], + [ + 1168, + 459 + ], + [ + 1192, + 459 + ], + [ + 1221, + 457 + ], + [ + 1251, + 455 + ], + [ + 1282, + 457 + ], + [ + 1294, + 458 + ], + [ + 1301, + 462 + ], + [ + 1301, + 466 + ], + [ + 1181, + 469 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1671, + 324 + ], + [ + 1673, + 557 + ], + [ + 1684, + 556 + ], + [ + 1678, + 323 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1699, + 447 + ], + [ + 1649, + 445 + ], + [ + 1656, + 504 + ], + [ + 1673, + 511 + ], + [ + 1696, + 504 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1675, + 285 + ], + [ + 1665, + 288 + ], + [ + 1656, + 295 + ], + [ + 1653, + 303 + ], + [ + 1653, + 313 + ], + [ + 1658, + 324 + ], + [ + 1666, + 328 + ], + [ + 1672, + 329 + ], + [ + 1680, + 329 + ], + [ + 1689, + 327 + ], + [ + 1696, + 319 + ], + [ + 1699, + 308 + ], + [ + 1699, + 298 + ], + [ + 1691, + 289 + ], + [ + 1681, + 285 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1734, + 463 + ], + [ + 1733, + 567 + ], + [ + 1742, + 570 + ], + [ + 1745, + 464 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1816, + 475 + ], + [ + 1837, + 588 + ], + [ + 1852, + 588 + ], + [ + 1829, + 473 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2035, + 480 + ], + [ + 2032, + 597 + ], + [ + 2044, + 599 + ], + [ + 2044, + 477 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1214, + 437 + ], + [ + 1194, + 436 + ], + [ + 1184, + 436 + ], + [ + 1167, + 447 + ], + [ + 1171, + 455 + ], + [ + 1175, + 456 + ], + [ + 1188, + 456 + ], + [ + 1200, + 456 + ], + [ + 1214, + 455 + ], + [ + 1216, + 454 + ], + [ + 1221, + 449 + ], + [ + 1221, + 446 + ], + [ + 1218, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1169, + 439 + ], + [ + 1154, + 438 + ], + [ + 1144, + 439 + ], + [ + 1146, + 444 + ], + [ + 1151, + 455 + ], + [ + 1165, + 456 + ], + [ + 1171, + 456 + ], + [ + 1173, + 451 + ], + [ + 1173, + 445 + ], + [ + 1171, + 440 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1130, + 440 + ], + [ + 1120, + 435 + ], + [ + 1104, + 435 + ], + [ + 1093, + 442 + ], + [ + 1092, + 456 + ], + [ + 1093, + 458 + ], + [ + 1108, + 455 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1142, + 436 + ], + [ + 1123, + 438 + ], + [ + 1112, + 442 + ], + [ + 1104, + 448 + ], + [ + 1102, + 452 + ], + [ + 1103, + 456 + ], + [ + 1108, + 459 + ], + [ + 1118, + 459 + ], + [ + 1130, + 459 + ], + [ + 1145, + 459 + ], + [ + 1153, + 459 + ], + [ + 1157, + 454 + ], + [ + 1157, + 449 + ], + [ + 1154, + 441 + ], + [ + 1146, + 438 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1033, + 451 + ], + [ + 1000, + 437 + ], + [ + 960, + 438 + ], + [ + 946, + 449 + ], + [ + 947, + 462 + ], + [ + 958, + 464 + ], + [ + 960, + 467 + ], + [ + 965, + 469 + ], + [ + 969, + 468 + ], + [ + 971, + 466 + ], + [ + 973, + 466 + ], + [ + 977, + 466 + ], + [ + 982, + 468 + ], + [ + 985, + 468 + ], + [ + 987, + 466 + ], + [ + 1012, + 466 + ], + [ + 1018, + 468 + ], + [ + 1023, + 468 + ], + [ + 1031, + 464 + ], + [ + 1040, + 457 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1075, + 435 + ], + [ + 1042, + 435 + ], + [ + 1038, + 436 + ], + [ + 1034, + 442 + ], + [ + 1029, + 454 + ], + [ + 1030, + 468 + ], + [ + 1031, + 472 + ], + [ + 1034, + 475 + ], + [ + 1040, + 477 + ], + [ + 1043, + 478 + ], + [ + 1054, + 472 + ], + [ + 1068, + 461 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1088, + 436 + ], + [ + 1075, + 435 + ], + [ + 1057, + 437 + ], + [ + 1050, + 446 + ], + [ + 1047, + 459 + ], + [ + 1046, + 471 + ], + [ + 1046, + 476 + ], + [ + 1048, + 480 + ], + [ + 1053, + 480 + ], + [ + 1055, + 479 + ], + [ + 1055, + 475 + ], + [ + 1081, + 475 + ], + [ + 1085, + 475 + ], + [ + 1091, + 476 + ], + [ + 1094, + 479 + ], + [ + 1100, + 479 + ], + [ + 1100, + 468 + ], + [ + 1100, + 455 + ], + [ + 1096, + 445 + ], + [ + 1093, + 439 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 852, + 294 + ], + [ + 853, + 440 + ], + [ + 861, + 440 + ], + [ + 860, + 295 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 552, + 499 + ], + [ + 632, + 511 + ], + [ + 638, + 515 + ], + [ + 582, + 529 + ], + [ + 0, + 632 + ], + [ + 0, + 554 + ], + [ + 537, + 499 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 888, + 441 + ], + [ + 866, + 440 + ], + [ + 876, + 479 + ], + [ + 893, + 480 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 735, + 432 + ], + [ + 735, + 393 + ], + [ + 735, + 375 + ], + [ + 735, + 356 + ], + [ + 735, + 342 + ], + [ + 736, + 332 + ], + [ + 737, + 322 + ], + [ + 737, + 312 + ], + [ + 733, + 299 + ], + [ + 734, + 287 + ], + [ + 740, + 282 + ], + [ + 743, + 280 + ], + [ + 745, + 284 + ], + [ + 749, + 291 + ], + [ + 756, + 291 + ], + [ + 760, + 289 + ], + [ + 765, + 292 + ], + [ + 779, + 308 + ], + [ + 778, + 315 + ], + [ + 779, + 399 + ], + [ + 786, + 393 + ], + [ + 790, + 387 + ], + [ + 798, + 380 + ], + [ + 804, + 379 + ], + [ + 815, + 379 + ], + [ + 821, + 380 + ], + [ + 818, + 372 + ], + [ + 815, + 362 + ], + [ + 819, + 354 + ], + [ + 814, + 349 + ], + [ + 808, + 347 + ], + [ + 816, + 346 + ], + [ + 822, + 342 + ], + [ + 817, + 335 + ], + [ + 809, + 324 + ], + [ + 809, + 318 + ], + [ + 801, + 315 + ], + [ + 799, + 310 + ], + [ + 808, + 309 + ], + [ + 817, + 309 + ], + [ + 820, + 302 + ], + [ + 820, + 299 + ], + [ + 809, + 292 + ], + [ + 813, + 282 + ], + [ + 809, + 269 + ], + [ + 814, + 265 + ], + [ + 827, + 264 + ], + [ + 843, + 264 + ], + [ + 850, + 260 + ], + [ + 854, + 253 + ], + [ + 852, + 239 + ], + [ + 850, + 229 + ], + [ + 845, + 216 + ], + [ + 839, + 197 + ], + [ + 832, + 199 + ], + [ + 823, + 204 + ], + [ + 808, + 201 + ], + [ + 799, + 197 + ], + [ + 790, + 200 + ], + [ + 796, + 192 + ], + [ + 807, + 178 + ], + [ + 805, + 173 + ], + [ + 812, + 171 + ], + [ + 821, + 171 + ], + [ + 824, + 173 + ], + [ + 832, + 176 + ], + [ + 832, + 169 + ], + [ + 842, + 168 + ], + [ + 850, + 167 + ], + [ + 836, + 160 + ], + [ + 850, + 156 + ], + [ + 859, + 152 + ], + [ + 853, + 145 + ], + [ + 869, + 143 + ], + [ + 880, + 137 + ], + [ + 898, + 138 + ], + [ + 912, + 139 + ], + [ + 919, + 132 + ], + [ + 917, + 126 + ], + [ + 909, + 120 + ], + [ + 918, + 114 + ], + [ + 930, + 111 + ], + [ + 946, + 108 + ], + [ + 947, + 97 + ], + [ + 935, + 78 + ], + [ + 921, + 79 + ], + [ + 913, + 76 + ], + [ + 914, + 72 + ], + [ + 919, + 65 + ], + [ + 911, + 62 + ], + [ + 900, + 67 + ], + [ + 898, + 57 + ], + [ + 891, + 53 + ], + [ + 885, + 59 + ], + [ + 879, + 60 + ], + [ + 884, + 51 + ], + [ + 884, + 43 + ], + [ + 869, + 44 + ], + [ + 863, + 46 + ], + [ + 860, + 33 + ], + [ + 855, + 31 + ], + [ + 850, + 38 + ], + [ + 844, + 31 + ], + [ + 839, + 36 + ], + [ + 829, + 21 + ], + [ + 816, + 0 + ], + [ + 468, + 0 + ], + [ + 471, + 2 + ], + [ + 479, + 7 + ], + [ + 488, + 10 + ], + [ + 495, + 9 + ], + [ + 506, + 15 + ], + [ + 519, + 16 + ], + [ + 524, + 21 + ], + [ + 536, + 29 + ], + [ + 548, + 35 + ], + [ + 546, + 39 + ], + [ + 530, + 35 + ], + [ + 518, + 31 + ], + [ + 505, + 26 + ], + [ + 494, + 30 + ], + [ + 488, + 24 + ], + [ + 486, + 24 + ], + [ + 485, + 35 + ], + [ + 481, + 50 + ], + [ + 480, + 58 + ], + [ + 492, + 59 + ], + [ + 509, + 60 + ], + [ + 511, + 73 + ], + [ + 521, + 83 + ], + [ + 533, + 96 + ], + [ + 535, + 103 + ], + [ + 534, + 105 + ], + [ + 524, + 98 + ], + [ + 514, + 102 + ], + [ + 500, + 99 + ], + [ + 493, + 95 + ], + [ + 480, + 110 + ], + [ + 483, + 116 + ], + [ + 487, + 120 + ], + [ + 498, + 119 + ], + [ + 502, + 123 + ], + [ + 502, + 135 + ], + [ + 511, + 143 + ], + [ + 525, + 139 + ], + [ + 542, + 145 + ], + [ + 555, + 154 + ], + [ + 541, + 159 + ], + [ + 534, + 162 + ], + [ + 518, + 160 + ], + [ + 514, + 157 + ], + [ + 513, + 164 + ], + [ + 512, + 167 + ], + [ + 500, + 165 + ], + [ + 492, + 165 + ], + [ + 484, + 157 + ], + [ + 479, + 166 + ], + [ + 498, + 172 + ], + [ + 496, + 179 + ], + [ + 484, + 179 + ], + [ + 459, + 172 + ], + [ + 456, + 177 + ], + [ + 465, + 185 + ], + [ + 482, + 195 + ], + [ + 492, + 203 + ], + [ + 529, + 209 + ], + [ + 525, + 216 + ], + [ + 532, + 227 + ], + [ + 543, + 236 + ], + [ + 548, + 244 + ], + [ + 561, + 243 + ], + [ + 563, + 239 + ], + [ + 554, + 231 + ], + [ + 574, + 238 + ], + [ + 577, + 247 + ], + [ + 580, + 264 + ], + [ + 582, + 281 + ], + [ + 582, + 316 + ], + [ + 581, + 331 + ], + [ + 583, + 359 + ], + [ + 585, + 400 + ], + [ + 584, + 441 + ], + [ + 593, + 455 + ], + [ + 594, + 445 + ], + [ + 593, + 339 + ], + [ + 593, + 334 + ], + [ + 593, + 325 + ], + [ + 590, + 315 + ], + [ + 593, + 297 + ], + [ + 593, + 289 + ], + [ + 592, + 273 + ], + [ + 590, + 266 + ], + [ + 593, + 250 + ], + [ + 604, + 254 + ], + [ + 611, + 246 + ], + [ + 614, + 238 + ], + [ + 616, + 227 + ], + [ + 624, + 224 + ], + [ + 632, + 229 + ], + [ + 645, + 233 + ], + [ + 655, + 235 + ], + [ + 662, + 232 + ], + [ + 663, + 226 + ], + [ + 655, + 222 + ], + [ + 659, + 220 + ], + [ + 677, + 227 + ], + [ + 690, + 231 + ], + [ + 694, + 222 + ], + [ + 695, + 213 + ], + [ + 690, + 204 + ], + [ + 683, + 201 + ], + [ + 668, + 197 + ], + [ + 655, + 200 + ], + [ + 647, + 203 + ], + [ + 645, + 196 + ], + [ + 654, + 189 + ], + [ + 669, + 191 + ], + [ + 682, + 192 + ], + [ + 690, + 196 + ], + [ + 701, + 205 + ], + [ + 711, + 217 + ], + [ + 714, + 223 + ], + [ + 721, + 231 + ], + [ + 726, + 261 + ], + [ + 726, + 298 + ], + [ + 725, + 322 + ], + [ + 725, + 352 + ], + [ + 725, + 391 + ], + [ + 724, + 435 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 836, + 442 + ], + [ + 831, + 432 + ], + [ + 806, + 430 + ], + [ + 778, + 431 + ], + [ + 779, + 441 + ], + [ + 804, + 484 + ], + [ + 809, + 488 + ], + [ + 814, + 488 + ], + [ + 825, + 471 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 803, + 441 + ], + [ + 774, + 438 + ], + [ + 770, + 442 + ], + [ + 788, + 478 + ], + [ + 791, + 490 + ], + [ + 794, + 492 + ], + [ + 800, + 491 + ], + [ + 806, + 489 + ], + [ + 811, + 489 + ], + [ + 812, + 484 + ], + [ + 814, + 469 + ], + [ + 811, + 462 + ], + [ + 811, + 455 + ], + [ + 810, + 450 + ], + [ + 806, + 443 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 739, + 436 + ], + [ + 778, + 440 + ], + [ + 788, + 447 + ], + [ + 792, + 459 + ], + [ + 793, + 474 + ], + [ + 794, + 485 + ], + [ + 793, + 491 + ], + [ + 791, + 494 + ], + [ + 785, + 494 + ], + [ + 773, + 494 + ], + [ + 771, + 489 + ], + [ + 760, + 484 + ], + [ + 739, + 477 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 778, + 484 + ], + [ + 772, + 492 + ], + [ + 764, + 493 + ], + [ + 758, + 493 + ], + [ + 752, + 485 + ], + [ + 759, + 479 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 664, + 439 + ], + [ + 645, + 439 + ], + [ + 633, + 453 + ], + [ + 630, + 455 + ], + [ + 623, + 454 + ], + [ + 626, + 459 + ], + [ + 624, + 468 + ], + [ + 617, + 478 + ], + [ + 610, + 494 + ], + [ + 611, + 497 + ], + [ + 615, + 502 + ], + [ + 623, + 503 + ], + [ + 641, + 504 + ], + [ + 651, + 499 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 644, + 512 + ], + [ + 605, + 514 + ], + [ + 604, + 504 + ], + [ + 611, + 501 + ], + [ + 622, + 500 + ], + [ + 630, + 501 + ], + [ + 640, + 504 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 503, + 444 + ], + [ + 507, + 435 + ], + [ + 518, + 430 + ], + [ + 534, + 432 + ], + [ + 567, + 434 + ], + [ + 571, + 432 + ], + [ + 580, + 432 + ], + [ + 586, + 435 + ], + [ + 594, + 443 + ], + [ + 604, + 463 + ], + [ + 610, + 480 + ], + [ + 613, + 498 + ], + [ + 611, + 511 + ], + [ + 607, + 521 + ], + [ + 601, + 523 + ], + [ + 597, + 523 + ], + [ + 592, + 523 + ], + [ + 590, + 520 + ], + [ + 588, + 514 + ], + [ + 578, + 514 + ], + [ + 578, + 523 + ], + [ + 575, + 528 + ], + [ + 570, + 529 + ], + [ + 561, + 529 + ], + [ + 551, + 529 + ], + [ + 541, + 515 + ], + [ + 506, + 479 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 358, + 459 + ], + [ + 353, + 453 + ], + [ + 359, + 449 + ], + [ + 368, + 450 + ], + [ + 376, + 452 + ], + [ + 386, + 453 + ], + [ + 396, + 454 + ], + [ + 398, + 456 + ], + [ + 391, + 460 + ], + [ + 367, + 462 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 523, + 439 + ], + [ + 476, + 437 + ], + [ + 441, + 438 + ], + [ + 423, + 442 + ], + [ + 404, + 459 + ], + [ + 425, + 517 + ], + [ + 440, + 538 + ], + [ + 453, + 538 + ], + [ + 488, + 537 + ], + [ + 493, + 541 + ], + [ + 503, + 544 + ], + [ + 509, + 543 + ], + [ + 513, + 541 + ], + [ + 515, + 531 + ], + [ + 529, + 527 + ], + [ + 531, + 533 + ], + [ + 537, + 538 + ], + [ + 548, + 537 + ], + [ + 551, + 525 + ], + [ + 551, + 506 + ], + [ + 549, + 483 + ], + [ + 540, + 469 + ], + [ + 531, + 443 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 374, + 328 + ], + [ + 366, + 333 + ], + [ + 360, + 339 + ], + [ + 359, + 347 + ], + [ + 361, + 358 + ], + [ + 365, + 366 + ], + [ + 372, + 370 + ], + [ + 377, + 371 + ], + [ + 383, + 371 + ], + [ + 390, + 361 + ], + [ + 393, + 350 + ], + [ + 391, + 339 + ], + [ + 387, + 334 + ], + [ + 382, + 330 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 369, + 350 + ], + [ + 372, + 459 + ], + [ + 379, + 459 + ], + [ + 376, + 348 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 301, + 464 + ], + [ + 323, + 458 + ], + [ + 355, + 456 + ], + [ + 383, + 458 + ], + [ + 411, + 462 + ], + [ + 419, + 471 + ], + [ + 425, + 482 + ], + [ + 434, + 486 + ], + [ + 449, + 488 + ], + [ + 453, + 497 + ], + [ + 453, + 517 + ], + [ + 450, + 531 + ], + [ + 451, + 539 + ], + [ + 447, + 549 + ], + [ + 443, + 552 + ], + [ + 437, + 553 + ], + [ + 429, + 553 + ], + [ + 424, + 551 + ], + [ + 422, + 545 + ], + [ + 394, + 547 + ], + [ + 391, + 558 + ], + [ + 386, + 561 + ], + [ + 372, + 561 + ], + [ + 365, + 557 + ], + [ + 350, + 554 + ], + [ + 317, + 555 + ], + [ + 270, + 497 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 31, + 430 + ], + [ + 105, + 418 + ], + [ + 189, + 418 + ], + [ + 235, + 422 + ], + [ + 272, + 433 + ], + [ + 294, + 453 + ], + [ + 319, + 478 + ], + [ + 326, + 491 + ], + [ + 324, + 531 + ], + [ + 323, + 554 + ], + [ + 318, + 567 + ], + [ + 319, + 581 + ], + [ + 318, + 597 + ], + [ + 314, + 610 + ], + [ + 309, + 614 + ], + [ + 299, + 617 + ], + [ + 283, + 616 + ], + [ + 267, + 592 + ], + [ + 187, + 605 + ], + [ + 182, + 624 + ], + [ + 176, + 635 + ], + [ + 169, + 641 + ], + [ + 147, + 642 + ], + [ + 138, + 637 + ], + [ + 126, + 618 + ], + [ + 0, + 622 + ], + [ + 0, + 439 + ], + [ + 24, + 432 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 716, + 426 + ], + [ + 690, + 427 + ], + [ + 671, + 428 + ], + [ + 659, + 432 + ], + [ + 648, + 449 + ], + [ + 641, + 455 + ], + [ + 637, + 457 + ], + [ + 636, + 459 + ], + [ + 639, + 464 + ], + [ + 638, + 470 + ], + [ + 636, + 487 + ], + [ + 636, + 499 + ], + [ + 637, + 510 + ], + [ + 638, + 517 + ], + [ + 640, + 519 + ], + [ + 643, + 522 + ], + [ + 647, + 522 + ], + [ + 653, + 522 + ], + [ + 654, + 518 + ], + [ + 654, + 513 + ], + [ + 661, + 512 + ], + [ + 663, + 514 + ], + [ + 664, + 518 + ], + [ + 670, + 519 + ], + [ + 674, + 518 + ], + [ + 675, + 516 + ], + [ + 677, + 512 + ], + [ + 725, + 509 + ], + [ + 726, + 516 + ], + [ + 733, + 518 + ], + [ + 739, + 518 + ], + [ + 740, + 513 + ], + [ + 742, + 505 + ], + [ + 745, + 510 + ], + [ + 747, + 513 + ], + [ + 752, + 514 + ], + [ + 758, + 513 + ], + [ + 759, + 505 + ], + [ + 759, + 496 + ], + [ + 759, + 480 + ], + [ + 756, + 464 + ], + [ + 749, + 446 + ], + [ + 736, + 428 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1018, + 38 + ], + [ + 1018, + 50 + ], + [ + 1002, + 52 + ], + [ + 988, + 48 + ], + [ + 987, + 39 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 849, + 377 + ], + [ + 849, + 439 + ], + [ + 853, + 440 + ], + [ + 853, + 376 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 834, + 378 + ], + [ + 834, + 385 + ], + [ + 850, + 385 + ], + [ + 850, + 378 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 858, + 398 + ], + [ + 852, + 397 + ], + [ + 851, + 420 + ], + [ + 859, + 416 + ], + [ + 859, + 414 + ], + [ + 855, + 413 + ], + [ + 859, + 410 + ], + [ + 859, + 406 + ], + [ + 854, + 405 + ], + [ + 858, + 402 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 867, + 436 + ], + [ + 825, + 439 + ], + [ + 818, + 455 + ], + [ + 815, + 454 + ], + [ + 813, + 454 + ], + [ + 810, + 454 + ], + [ + 810, + 456 + ], + [ + 810, + 460 + ], + [ + 815, + 460 + ], + [ + 811, + 480 + ], + [ + 813, + 489 + ], + [ + 816, + 493 + ], + [ + 822, + 493 + ], + [ + 829, + 493 + ], + [ + 831, + 490 + ], + [ + 832, + 487 + ], + [ + 867, + 484 + ], + [ + 867, + 489 + ], + [ + 869, + 490 + ], + [ + 878, + 490 + ], + [ + 882, + 486 + ], + [ + 884, + 482 + ], + [ + 886, + 472 + ], + [ + 883, + 461 + ], + [ + 879, + 451 + ], + [ + 871, + 439 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 901, + 470 + ], + [ + 899, + 436 + ], + [ + 890, + 431 + ], + [ + 887, + 427 + ], + [ + 886, + 425 + ], + [ + 882, + 426 + ], + [ + 881, + 429 + ], + [ + 882, + 433 + ], + [ + 883, + 438 + ], + [ + 882, + 447 + ], + [ + 881, + 455 + ], + [ + 883, + 461 + ], + [ + 887, + 476 + ], + [ + 887, + 478 + ], + [ + 888, + 483 + ], + [ + 888, + 486 + ], + [ + 887, + 488 + ], + [ + 890, + 491 + ], + [ + 895, + 486 + ], + [ + 895, + 478 + ], + [ + 899, + 486 + ], + [ + 902, + 488 + ], + [ + 903, + 482 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 921, + 410 + ], + [ + 922, + 461 + ], + [ + 925, + 461 + ], + [ + 924, + 410 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 933, + 413 + ], + [ + 923, + 413 + ], + [ + 924, + 419 + ], + [ + 933, + 419 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 908, + 407 + ], + [ + 923, + 407 + ], + [ + 923, + 414 + ], + [ + 909, + 414 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 799, + 373 + ], + [ + 778, + 372 + ], + [ + 780, + 380 + ], + [ + 799, + 381 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1276, + 310 + ], + [ + 1256, + 319 + ], + [ + 1250, + 326 + ], + [ + 1244, + 327 + ], + [ + 1238, + 327 + ], + [ + 1235, + 336 + ], + [ + 1229, + 351 + ], + [ + 1220, + 359 + ], + [ + 1218, + 366 + ], + [ + 1223, + 379 + ], + [ + 1232, + 388 + ], + [ + 1246, + 392 + ], + [ + 1261, + 386 + ], + [ + 1273, + 382 + ], + [ + 1269, + 406 + ], + [ + 1279, + 410 + ], + [ + 1288, + 414 + ], + [ + 1296, + 414 + ], + [ + 1303, + 413 + ], + [ + 1307, + 410 + ], + [ + 1307, + 397 + ], + [ + 1308, + 383 + ], + [ + 1307, + 368 + ], + [ + 1299, + 357 + ], + [ + 1298, + 347 + ], + [ + 1300, + 341 + ], + [ + 1300, + 322 + ], + [ + 1279, + 318 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1255, + 206 + ], + [ + 1259, + 465 + ], + [ + 1263, + 466 + ], + [ + 1259, + 207 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1243, + 405 + ], + [ + 1244, + 471 + ], + [ + 1254, + 471 + ], + [ + 1253, + 405 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1177, + 329 + ], + [ + 1203, + 333 + ], + [ + 1218, + 339 + ], + [ + 1226, + 344 + ], + [ + 1230, + 358 + ], + [ + 1230, + 368 + ], + [ + 1231, + 471 + ], + [ + 1227, + 471 + ], + [ + 1228, + 360 + ], + [ + 1224, + 347 + ], + [ + 1218, + 342 + ], + [ + 1202, + 335 + ], + [ + 1159, + 329 + ], + [ + 1147, + 328 + ], + [ + 1147, + 325 + ], + [ + 1167, + 328 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1137, + 320 + ], + [ + 1138, + 344 + ], + [ + 1150, + 344 + ], + [ + 1149, + 319 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1362, + 359 + ], + [ + 1307, + 359 + ], + [ + 1308, + 386 + ], + [ + 1365, + 385 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1361, + 354 + ], + [ + 1363, + 479 + ], + [ + 1370, + 479 + ], + [ + 1367, + 354 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1208, + 448 + ], + [ + 1208, + 471 + ], + [ + 1212, + 471 + ], + [ + 1212, + 447 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1273, + 261 + ], + [ + 1275, + 459 + ], + [ + 1282, + 460 + ], + [ + 1280, + 261 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1286, + 481 + ], + [ + 1285, + 443 + ], + [ + 1288, + 443 + ], + [ + 1290, + 481 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1264, + 474 + ], + [ + 1262, + 474 + ], + [ + 1260, + 441 + ], + [ + 1263, + 441 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1316, + 351 + ], + [ + 1279, + 351 + ], + [ + 1280, + 369 + ], + [ + 1318, + 368 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1286, + 362 + ], + [ + 1227, + 361 + ], + [ + 1229, + 410 + ], + [ + 1290, + 409 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1250, + 372 + ], + [ + 1253, + 411 + ], + [ + 1275, + 410 + ], + [ + 1274, + 372 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1315, + 494 + ], + [ + 1311, + 149 + ], + [ + 1328, + 149 + ], + [ + 1330, + 495 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1207, + 198 + ], + [ + 1231, + 202 + ], + [ + 1263, + 211 + ], + [ + 1302, + 219 + ], + [ + 1320, + 224 + ], + [ + 1335, + 234 + ], + [ + 1343, + 241 + ], + [ + 1348, + 259 + ], + [ + 1348, + 312 + ], + [ + 1348, + 341 + ], + [ + 1345, + 462 + ], + [ + 1346, + 496 + ], + [ + 1353, + 497 + ], + [ + 1352, + 457 + ], + [ + 1351, + 453 + ], + [ + 1351, + 385 + ], + [ + 1352, + 258 + ], + [ + 1347, + 237 + ], + [ + 1325, + 220 + ], + [ + 1292, + 212 + ], + [ + 1253, + 205 + ], + [ + 1207, + 194 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1183, + 193 + ], + [ + 1184, + 216 + ], + [ + 1187, + 219 + ], + [ + 1197, + 221 + ], + [ + 1207, + 221 + ], + [ + 1211, + 215 + ], + [ + 1210, + 184 + ], + [ + 1207, + 181 + ], + [ + 1199, + 179 + ], + [ + 1188, + 179 + ], + [ + 1184, + 182 + ], + [ + 1183, + 188 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1384, + 312 + ], + [ + 1383, + 329 + ], + [ + 1328, + 330 + ], + [ + 1313, + 320 + ], + [ + 1327, + 311 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1226, + 395 + ], + [ + 1218, + 396 + ], + [ + 1220, + 399 + ], + [ + 1215, + 399 + ], + [ + 1216, + 403 + ], + [ + 1221, + 403 + ], + [ + 1220, + 405 + ], + [ + 1214, + 404 + ], + [ + 1216, + 408 + ], + [ + 1219, + 409 + ], + [ + 1220, + 411 + ], + [ + 1216, + 412 + ], + [ + 1217, + 416 + ], + [ + 1228, + 416 + ], + [ + 1228, + 395 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1025, + 361 + ], + [ + 1025, + 378 + ], + [ + 1032, + 378 + ], + [ + 1031, + 361 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000175_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000175_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..75e8ff86b6064c6a2e0d30ff3623d44c080c763b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000175_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000177_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000177_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..34c072002bd63c5783ccc1a65e874d582c8c77a8 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000177_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000177_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000177_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..2a06eebea9fb1088717b429d8064a7715a35f58f Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000177_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000178_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000178_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..1bfd7bc0a874fe57a5dceb698b009ef605187115 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000178_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000180_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000180_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..d06b8de8b39dace72a34672719f5bb1674f2b700 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000180_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000180_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000180_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..065176f9190237de6b7d95c70454d050363b76b3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000180_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000181_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000181_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..b9926b332fc71d7d449c558e32cee494cc466739 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000181_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000183_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000183_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..0fde5bece32248e998da45ee38d4a0c6d2648511 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000183_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000183_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000183_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..fe26836e2cc224059412f837bc5cd226eba7bfb8 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000183_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000184_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000184_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..5ef7d473a01a60ec08bf802390e9773e8d6576a7 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000184_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000184_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000184_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..1316faec37059a82565d0f8591211e8b6a3a3f0e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000184_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000185_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000185_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..1524dfd636a01567a7d91cfde5501eeb4ac5096f Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000185_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000185_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000185_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..cae28321752c39d8a30367628e15a0d3ffd645fc Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000185_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000186_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000186_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..a7ac506ada3a1aee7f137eb69de9d978abe24395 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000186_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000187_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000187_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5ca786a92d421e3eac37cedaf125fa7c00f41ec5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000187_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000188_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000188_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..75c8d26d673448dc09c773df32adc941e61affce Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000188_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000188_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000188_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..b45aa09aa04c46cb716c21911181f795352bc21d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000188_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000189_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000189_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..dc5418c9652a1d0d19ebcab7deab40207b4a5af2 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000189_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000189_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000189_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..2066070583d2d074d7bd5cc2c639cb8a2fe65b8a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000189_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000190_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000190_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..0013f633ee2cbad3359eba400ba84f7ce3a7c066 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000190_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000190_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000190_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..b4fd5269782c382a5fb50284ecc43f75a98b06de Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000190_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000191_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000191_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d6c0466a7fd475164bde76767142ce6f5a348d34 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000191_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000192_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000192_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..a35f75f480de9012b82fe2b59ea9759d083d29df Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000192_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000192_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000192_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..15735f3d69cf1c9acfb1eac4597981cc1ad4386c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000192_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000193_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000193_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..bd6c426510b3f3abb715ed54a4d71dd9939d9cda Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000193_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000193_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000193_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..acbe3d11e6cf12b72523ff8da912fab01e1f1b32 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000193_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..1f6aebe4181d1e2ecd5f319b308321f10418c598 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..64bd8736008a2a91154c9d457ca8214508a0d0e5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..750bdbf71d2dfe0eeb2368ebb6d79d1f4527c7f6 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000194_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000195_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000195_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..53c11e6dfd1aea4b35cb1e42d2a9edd84a46fb75 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000195_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000196_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000196_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..5c5dfeb59ee4d8ddb168c54a447ef611204c50ab Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000196_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000197_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000197_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..3ece137d721527a9b5dda4fe22bd102fe5494fa6 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000197_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000197_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000197_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..65219647be17833ea73f37f1cb95b555a4876c5a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000197_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..eb1a367a67093cae5277f2a2b7c112f0f8a723e2 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..6f7d1085b85e2f7325351259b0474ca8be85c0df Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..c49dc4b789b951443c6efb8a94356bc4d774e31c --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000198_000019_gtFine_polygons.json @@ -0,0 +1,3069 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 2063, + 356 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 291 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2, + 697 + ], + [ + 99, + 701 + ], + [ + 199, + 712 + ], + [ + 240, + 722 + ], + [ + 260, + 740 + ], + [ + 266, + 761 + ], + [ + 213, + 795 + ], + [ + 0, + 918 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2041, + 804 + ], + [ + 1779, + 713 + ], + [ + 1596, + 655 + ], + [ + 1533, + 635 + ], + [ + 1527, + 612 + ], + [ + 1543, + 592 + ], + [ + 1589, + 576 + ], + [ + 1711, + 559 + ], + [ + 1918, + 552 + ], + [ + 1988, + 543 + ], + [ + 2048, + 599 + ], + [ + 2048, + 809 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 7, + 485 + ], + [ + 389, + 477 + ], + [ + 733, + 472 + ], + [ + 960, + 465 + ], + [ + 1506, + 456 + ], + [ + 1501, + 450 + ], + [ + 1279, + 423 + ], + [ + 811, + 403 + ], + [ + 0, + 434 + ], + [ + 0, + 485 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1946, + 491 + ], + [ + 1616, + 507 + ], + [ + 1203, + 523 + ], + [ + 933, + 538 + ], + [ + 605, + 547 + ], + [ + 440, + 550 + ], + [ + 320, + 550 + ], + [ + 284, + 546 + ], + [ + 271, + 542 + ], + [ + 270, + 534 + ], + [ + 276, + 531 + ], + [ + 312, + 527 + ], + [ + 380, + 526 + ], + [ + 786, + 511 + ], + [ + 1046, + 501 + ], + [ + 1648, + 481 + ], + [ + 1956, + 462 + ], + [ + 1962, + 484 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 9, + 471 + ], + [ + 324, + 472 + ], + [ + 384, + 471 + ], + [ + 739, + 464 + ], + [ + 942, + 457 + ], + [ + 1222, + 449 + ], + [ + 1382, + 446 + ], + [ + 1502, + 456 + ], + [ + 1522, + 454 + ], + [ + 1613, + 452 + ], + [ + 1676, + 449 + ], + [ + 1773, + 445 + ], + [ + 1978, + 449 + ], + [ + 1974, + 591 + ], + [ + 2048, + 601 + ], + [ + 2048, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 471 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1946, + 491 + ], + [ + 1616, + 507 + ], + [ + 1203, + 523 + ], + [ + 933, + 538 + ], + [ + 605, + 547 + ], + [ + 440, + 550 + ], + [ + 320, + 550 + ], + [ + 284, + 546 + ], + [ + 271, + 542 + ], + [ + 270, + 534 + ], + [ + 276, + 531 + ], + [ + 312, + 527 + ], + [ + 380, + 526 + ], + [ + 786, + 511 + ], + [ + 1046, + 501 + ], + [ + 1648, + 481 + ], + [ + 1956, + 462 + ], + [ + 1962, + 484 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1480, + 201 + ], + [ + 1473, + 419 + ], + [ + 1477, + 419 + ], + [ + 1484, + 201 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1551, + 411 + ], + [ + 1566, + 432 + ], + [ + 1562, + 459 + ], + [ + 1502, + 459 + ], + [ + 1468, + 458 + ], + [ + 1447, + 412 + ], + [ + 1480, + 413 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1429, + 375 + ], + [ + 1430, + 406 + ], + [ + 1460, + 419 + ], + [ + 1460, + 375 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1154, + 168 + ], + [ + 1148, + 419 + ], + [ + 1153, + 419 + ], + [ + 1157, + 168 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1145, + 133 + ], + [ + 1160, + 134 + ], + [ + 1166, + 162 + ], + [ + 1161, + 173 + ], + [ + 1151, + 171 + ], + [ + 1150, + 160 + ], + [ + 1147, + 160 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1174, + 429 + ], + [ + 1173, + 412 + ], + [ + 1135, + 411 + ], + [ + 1141, + 445 + ], + [ + 1151, + 446 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1034, + 405 + ], + [ + 1033, + 395 + ], + [ + 1032, + 72 + ], + [ + 1032, + 19 + ], + [ + 1032, + 0 + ], + [ + 1040, + 0 + ], + [ + 1043, + 0 + ], + [ + 1037, + 6 + ], + [ + 1036, + 40 + ], + [ + 1041, + 399 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 727, + 129 + ], + [ + 710, + 416 + ], + [ + 715, + 420 + ], + [ + 716, + 417 + ], + [ + 730, + 129 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 744, + 274 + ], + [ + 748, + 289 + ], + [ + 747, + 305 + ], + [ + 746, + 347 + ], + [ + 738, + 348 + ], + [ + 740, + 276 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 736, + 421 + ], + [ + 738, + 273 + ], + [ + 743, + 273 + ], + [ + 739, + 420 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 747, + 431 + ], + [ + 744, + 413 + ], + [ + 703, + 411 + ], + [ + 686, + 458 + ], + [ + 687, + 469 + ], + [ + 731, + 467 + ], + [ + 744, + 444 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 424, + 242 + ], + [ + 425, + 373 + ], + [ + 432, + 373 + ], + [ + 428, + 240 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 441, + 234 + ], + [ + 426, + 236 + ], + [ + 411, + 246 + ], + [ + 427, + 247 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 414, + 256 + ], + [ + 415, + 305 + ], + [ + 446, + 306 + ], + [ + 445, + 257 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 264, + 398 + ], + [ + 165, + 393 + ], + [ + 139, + 396 + ], + [ + 92, + 427 + ], + [ + 44, + 429 + ], + [ + 18, + 439 + ], + [ + 9, + 457 + ], + [ + 9, + 471 + ], + [ + 16, + 480 + ], + [ + 22, + 485 + ], + [ + 72, + 488 + ], + [ + 79, + 496 + ], + [ + 87, + 499 + ], + [ + 96, + 499 + ], + [ + 108, + 496 + ], + [ + 120, + 490 + ], + [ + 124, + 482 + ], + [ + 229, + 482 + ], + [ + 233, + 488 + ], + [ + 241, + 491 + ], + [ + 250, + 485 + ], + [ + 254, + 481 + ], + [ + 256, + 489 + ], + [ + 276, + 496 + ], + [ + 284, + 496 + ], + [ + 297, + 489 + ], + [ + 304, + 480 + ], + [ + 318, + 474 + ], + [ + 323, + 450 + ], + [ + 317, + 449 + ], + [ + 313, + 426 + ], + [ + 279, + 403 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 702, + 345 + ], + [ + 660, + 340 + ], + [ + 454, + 342 + ], + [ + 437, + 349 + ], + [ + 407, + 379 + ], + [ + 406, + 375 + ], + [ + 405, + 375 + ], + [ + 399, + 377 + ], + [ + 394, + 387 + ], + [ + 394, + 394 + ], + [ + 393, + 397 + ], + [ + 362, + 419 + ], + [ + 356, + 444 + ], + [ + 356, + 454 + ], + [ + 358, + 460 + ], + [ + 368, + 466 + ], + [ + 376, + 472 + ], + [ + 382, + 476 + ], + [ + 388, + 480 + ], + [ + 396, + 483 + ], + [ + 402, + 484 + ], + [ + 407, + 484 + ], + [ + 414, + 482 + ], + [ + 418, + 477 + ], + [ + 419, + 475 + ], + [ + 428, + 483 + ], + [ + 446, + 490 + ], + [ + 449, + 490 + ], + [ + 456, + 490 + ], + [ + 466, + 487 + ], + [ + 476, + 476 + ], + [ + 479, + 471 + ], + [ + 583, + 469 + ], + [ + 586, + 475 + ], + [ + 591, + 476 + ], + [ + 596, + 477 + ], + [ + 603, + 478 + ], + [ + 609, + 477 + ], + [ + 614, + 475 + ], + [ + 616, + 472 + ], + [ + 619, + 467 + ], + [ + 627, + 467 + ], + [ + 630, + 475 + ], + [ + 636, + 482 + ], + [ + 646, + 486 + ], + [ + 663, + 486 + ], + [ + 668, + 481 + ], + [ + 673, + 470 + ], + [ + 675, + 466 + ], + [ + 707, + 458 + ], + [ + 710, + 454 + ], + [ + 711, + 400 + ], + [ + 706, + 361 + ], + [ + 704, + 354 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 359, + 385 + ], + [ + 358, + 535 + ], + [ + 368, + 536 + ], + [ + 368, + 342 + ], + [ + 360, + 342 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 353, + 402 + ], + [ + 367, + 403 + ], + [ + 367, + 444 + ], + [ + 353, + 442 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 355, + 344 + ], + [ + 348, + 353 + ], + [ + 344, + 368 + ], + [ + 346, + 385 + ], + [ + 351, + 393 + ], + [ + 354, + 396 + ], + [ + 359, + 398 + ], + [ + 364, + 397 + ], + [ + 370, + 388 + ], + [ + 374, + 377 + ], + [ + 373, + 364 + ], + [ + 371, + 353 + ], + [ + 368, + 349 + ], + [ + 362, + 344 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 876, + 394 + ], + [ + 815, + 395 + ], + [ + 791, + 401 + ], + [ + 760, + 416 + ], + [ + 733, + 427 + ], + [ + 721, + 439 + ], + [ + 719, + 458 + ], + [ + 724, + 468 + ], + [ + 731, + 471 + ], + [ + 735, + 475 + ], + [ + 739, + 476 + ], + [ + 743, + 476 + ], + [ + 749, + 476 + ], + [ + 752, + 474 + ], + [ + 785, + 473 + ], + [ + 788, + 481 + ], + [ + 797, + 484 + ], + [ + 809, + 476 + ], + [ + 812, + 474 + ], + [ + 893, + 467 + ], + [ + 897, + 478 + ], + [ + 908, + 478 + ], + [ + 917, + 474 + ], + [ + 921, + 467 + ], + [ + 926, + 450 + ], + [ + 925, + 433 + ], + [ + 921, + 423 + ], + [ + 903, + 401 + ], + [ + 882, + 396 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1063, + 386 + ], + [ + 1037, + 392 + ], + [ + 1003, + 402 + ], + [ + 976, + 419 + ], + [ + 953, + 426 + ], + [ + 934, + 441 + ], + [ + 935, + 460 + ], + [ + 950, + 467 + ], + [ + 953, + 472 + ], + [ + 963, + 474 + ], + [ + 979, + 474 + ], + [ + 990, + 475 + ], + [ + 1001, + 477 + ], + [ + 1010, + 479 + ], + [ + 1020, + 476 + ], + [ + 1026, + 471 + ], + [ + 1036, + 469 + ], + [ + 1099, + 470 + ], + [ + 1124, + 465 + ], + [ + 1134, + 465 + ], + [ + 1148, + 464 + ], + [ + 1150, + 458 + ], + [ + 1150, + 440 + ], + [ + 1147, + 425 + ], + [ + 1136, + 403 + ], + [ + 1132, + 398 + ], + [ + 1114, + 398 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1454, + 405 + ], + [ + 1422, + 399 + ], + [ + 1380, + 398 + ], + [ + 1364, + 402 + ], + [ + 1336, + 417 + ], + [ + 1339, + 460 + ], + [ + 1348, + 462 + ], + [ + 1359, + 463 + ], + [ + 1365, + 463 + ], + [ + 1370, + 464 + ], + [ + 1372, + 466 + ], + [ + 1377, + 467 + ], + [ + 1384, + 467 + ], + [ + 1387, + 465 + ], + [ + 1390, + 461 + ], + [ + 1393, + 458 + ], + [ + 1396, + 461 + ], + [ + 1402, + 465 + ], + [ + 1412, + 465 + ], + [ + 1415, + 463 + ], + [ + 1418, + 459 + ], + [ + 1443, + 459 + ], + [ + 1447, + 465 + ], + [ + 1455, + 467 + ], + [ + 1465, + 466 + ], + [ + 1469, + 461 + ], + [ + 1471, + 449 + ], + [ + 1473, + 438 + ], + [ + 1470, + 423 + ], + [ + 1461, + 409 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1325, + 407 + ], + [ + 1286, + 396 + ], + [ + 1247, + 394 + ], + [ + 1217, + 400 + ], + [ + 1200, + 413 + ], + [ + 1186, + 419 + ], + [ + 1166, + 421 + ], + [ + 1150, + 429 + ], + [ + 1148, + 436 + ], + [ + 1148, + 452 + ], + [ + 1149, + 456 + ], + [ + 1154, + 462 + ], + [ + 1163, + 468 + ], + [ + 1168, + 468 + ], + [ + 1173, + 466 + ], + [ + 1182, + 461 + ], + [ + 1193, + 462 + ], + [ + 1202, + 467 + ], + [ + 1214, + 470 + ], + [ + 1221, + 471 + ], + [ + 1229, + 470 + ], + [ + 1233, + 465 + ], + [ + 1244, + 463 + ], + [ + 1259, + 464 + ], + [ + 1264, + 467 + ], + [ + 1274, + 467 + ], + [ + 1281, + 463 + ], + [ + 1285, + 460 + ], + [ + 1307, + 461 + ], + [ + 1311, + 465 + ], + [ + 1316, + 470 + ], + [ + 1328, + 470 + ], + [ + 1333, + 467 + ], + [ + 1338, + 461 + ], + [ + 1344, + 458 + ], + [ + 1351, + 450 + ], + [ + 1350, + 435 + ], + [ + 1346, + 418 + ], + [ + 1333, + 413 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 946, + 182 + ], + [ + 947, + 188 + ], + [ + 980, + 191 + ], + [ + 980, + 182 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 962, + 175 + ], + [ + 962, + 427 + ], + [ + 960, + 431 + ], + [ + 959, + 519 + ], + [ + 968, + 520 + ], + [ + 969, + 431 + ], + [ + 967, + 426 + ], + [ + 968, + 176 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 954, + 224 + ], + [ + 953, + 193 + ], + [ + 970, + 190 + ], + [ + 969, + 221 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1132, + 381 + ], + [ + 1132, + 389 + ], + [ + 1126, + 390 + ], + [ + 1130, + 515 + ], + [ + 1079, + 518 + ], + [ + 1041, + 513 + ], + [ + 1045, + 388 + ], + [ + 1042, + 388 + ], + [ + 1043, + 383 + ], + [ + 1078, + 381 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1667, + 406 + ], + [ + 1628, + 405 + ], + [ + 1598, + 402 + ], + [ + 1568, + 422 + ], + [ + 1550, + 428 + ], + [ + 1546, + 446 + ], + [ + 1547, + 451 + ], + [ + 1557, + 462 + ], + [ + 1567, + 462 + ], + [ + 1571, + 461 + ], + [ + 1575, + 459 + ], + [ + 1605, + 459 + ], + [ + 1607, + 463 + ], + [ + 1613, + 464 + ], + [ + 1622, + 464 + ], + [ + 1626, + 461 + ], + [ + 1632, + 458 + ], + [ + 1659, + 457 + ], + [ + 1666, + 461 + ], + [ + 1672, + 461 + ], + [ + 1676, + 461 + ], + [ + 1685, + 453 + ], + [ + 1689, + 443 + ], + [ + 1691, + 433 + ], + [ + 1687, + 422 + ], + [ + 1681, + 412 + ], + [ + 1676, + 407 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2025, + 186 + ], + [ + 2020, + 467 + ], + [ + 2011, + 476 + ], + [ + 2006, + 668 + ], + [ + 2044, + 669 + ], + [ + 2047, + 475 + ], + [ + 2044, + 469 + ], + [ + 2048, + 117 + ], + [ + 2019, + 115 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1964, + 177 + ], + [ + 1998, + 177 + ], + [ + 1998, + 181 + ], + [ + 2026, + 187 + ], + [ + 2034, + 173 + ], + [ + 2038, + 0 + ], + [ + 1947, + 0 + ], + [ + 1945, + 7 + ], + [ + 1944, + 16 + ], + [ + 1946, + 22 + ], + [ + 1966, + 33 + ], + [ + 1967, + 51 + ], + [ + 1954, + 55 + ], + [ + 1947, + 62 + ], + [ + 1942, + 82 + ], + [ + 1966, + 98 + ], + [ + 1967, + 114 + ], + [ + 1953, + 115 + ], + [ + 1943, + 126 + ], + [ + 1940, + 144 + ], + [ + 1963, + 156 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 2030, + 40 + ], + [ + 2020, + 180 + ], + [ + 2048, + 179 + ], + [ + 2048, + 0 + ], + [ + 2028, + 0 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1878, + 385 + ], + [ + 1866, + 362 + ], + [ + 1797, + 355 + ], + [ + 1779, + 383 + ], + [ + 1751, + 372 + ], + [ + 1748, + 422 + ], + [ + 1773, + 424 + ], + [ + 1782, + 412 + ], + [ + 1806, + 379 + ], + [ + 1840, + 385 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1838, + 262 + ], + [ + 1839, + 388 + ], + [ + 1846, + 388 + ], + [ + 1842, + 261 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1852, + 333 + ], + [ + 1843, + 333 + ], + [ + 1845, + 373 + ], + [ + 1853, + 373 + ], + [ + 1853, + 370 + ], + [ + 1860, + 367 + ], + [ + 1859, + 362 + ], + [ + 1863, + 357 + ], + [ + 1860, + 352 + ], + [ + 1855, + 351 + ], + [ + 1858, + 346 + ], + [ + 1860, + 342 + ], + [ + 1855, + 339 + ], + [ + 1853, + 339 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1907, + 292 + ], + [ + 1841, + 293 + ], + [ + 1841, + 320 + ], + [ + 1906, + 318 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1903, + 286 + ], + [ + 1899, + 431 + ], + [ + 1905, + 432 + ], + [ + 1908, + 421 + ], + [ + 1911, + 285 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1933, + 238 + ], + [ + 1958, + 238 + ], + [ + 1955, + 324 + ], + [ + 1928, + 324 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1823, + 426 + ], + [ + 1824, + 380 + ], + [ + 1908, + 378 + ], + [ + 1908, + 431 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1963, + 336 + ], + [ + 1947, + 338 + ], + [ + 1949, + 362 + ], + [ + 1963, + 367 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1977, + 353 + ], + [ + 1965, + 353 + ], + [ + 1965, + 377 + ], + [ + 1977, + 375 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1959, + 331 + ], + [ + 1959, + 422 + ], + [ + 1968, + 422 + ], + [ + 1967, + 333 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1942, + 378 + ], + [ + 1937, + 388 + ], + [ + 1937, + 399 + ], + [ + 1935, + 408 + ], + [ + 1936, + 417 + ], + [ + 1947, + 419 + ], + [ + 1958, + 416 + ], + [ + 1961, + 407 + ], + [ + 1958, + 397 + ], + [ + 1954, + 391 + ], + [ + 1952, + 385 + ], + [ + 1947, + 379 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1758, + 466 + ], + [ + 1765, + 0 + ], + [ + 1796, + 0 + ], + [ + 1791, + 426 + ] + ] + }, + { + "label": "fence", + "polygon": [ + [ + 1639, + 495 + ], + [ + 1641, + 414 + ], + [ + 1980, + 412 + ], + [ + 1977, + 477 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1734, + 423 + ], + [ + 1730, + 436 + ], + [ + 1734, + 452 + ], + [ + 1727, + 461 + ], + [ + 1722, + 463 + ], + [ + 1695, + 441 + ], + [ + 1686, + 441 + ], + [ + 1697, + 454 + ], + [ + 1708, + 463 + ], + [ + 1721, + 468 + ], + [ + 1727, + 466 + ], + [ + 1728, + 488 + ], + [ + 1718, + 491 + ], + [ + 1715, + 476 + ], + [ + 1718, + 474 + ], + [ + 1715, + 467 + ], + [ + 1714, + 467 + ], + [ + 1704, + 467 + ], + [ + 1699, + 468 + ], + [ + 1697, + 474 + ], + [ + 1699, + 478 + ], + [ + 1702, + 480 + ], + [ + 1706, + 480 + ], + [ + 1709, + 481 + ], + [ + 1713, + 489 + ], + [ + 1680, + 492 + ], + [ + 1685, + 511 + ], + [ + 1688, + 538 + ], + [ + 1692, + 556 + ], + [ + 1700, + 569 + ], + [ + 1709, + 574 + ], + [ + 1715, + 574 + ], + [ + 1723, + 571 + ], + [ + 1726, + 564 + ], + [ + 1727, + 550 + ], + [ + 1738, + 550 + ], + [ + 1746, + 560 + ], + [ + 1753, + 565 + ], + [ + 1765, + 567 + ], + [ + 1772, + 567 + ], + [ + 1778, + 561 + ], + [ + 1779, + 551 + ], + [ + 1781, + 545 + ], + [ + 1773, + 517 + ], + [ + 1757, + 501 + ], + [ + 1736, + 467 + ], + [ + 1739, + 457 + ], + [ + 1738, + 445 + ], + [ + 1736, + 436 + ], + [ + 1736, + 431 + ], + [ + 1737, + 427 + ], + [ + 1744, + 424 + ], + [ + 1747, + 422 + ], + [ + 1747, + 418 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1812, + 214 + ], + [ + 1754, + 204 + ], + [ + 1752, + 239 + ], + [ + 1811, + 247 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1742, + 571 + ], + [ + 1746, + 202 + ], + [ + 1757, + 203 + ], + [ + 1752, + 571 + ] + ] + }, + { + "label": "motorcycle", + "polygon": [ + [ + 1940, + 496 + ], + [ + 1942, + 507 + ], + [ + 1939, + 524 + ], + [ + 1943, + 531 + ], + [ + 1942, + 545 + ], + [ + 1945, + 551 + ], + [ + 1950, + 552 + ], + [ + 1958, + 551 + ], + [ + 1960, + 547 + ], + [ + 1964, + 533 + ], + [ + 1976, + 537 + ], + [ + 1979, + 431 + ], + [ + 1950, + 432 + ], + [ + 1947, + 436 + ], + [ + 1953, + 442 + ], + [ + 1958, + 444 + ], + [ + 1962, + 445 + ], + [ + 1965, + 450 + ], + [ + 1965, + 453 + ], + [ + 1955, + 456 + ], + [ + 1947, + 467 + ], + [ + 1943, + 480 + ], + [ + 1939, + 490 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1908, + 555 + ], + [ + 1925, + 0 + ], + [ + 1939, + 0 + ], + [ + 1930, + 558 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1912, + 241 + ], + [ + 1906, + 252 + ], + [ + 1904, + 266 + ], + [ + 1905, + 279 + ], + [ + 1911, + 286 + ], + [ + 1916, + 289 + ], + [ + 1926, + 286 + ], + [ + 1934, + 276 + ], + [ + 1938, + 262 + ], + [ + 1938, + 251 + ], + [ + 1929, + 238 + ], + [ + 1919, + 238 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..23259bd695cd1bcfcb9dfcb9bfa7b7dcad4cebfd Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c453bee7d713907ea9bdd734c943f238a46afcf5 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..0646b15dfc9ccd6bf6c5cbd6dacdfd7fee58c621 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000200_000019_gtFine_polygons.json @@ -0,0 +1,3821 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sidewalk", + "polygon": [ + [ + 34, + 260 + ], + [ + 2048, + 309 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 259 + ] + ] + }, + { + "label": "road", + "polygon": [ + [ + 34, + 260 + ], + [ + 2048, + 309 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 259 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 277, + 95 + ], + [ + 942, + 354 + ], + [ + 1084, + 352 + ], + [ + 1226, + 286 + ], + [ + 1402, + 81 + ], + [ + 1452, + 0 + ], + [ + 259, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1999, + 1024 + ], + [ + 1526, + 732 + ], + [ + 1243, + 541 + ], + [ + 1186, + 495 + ], + [ + 1173, + 473 + ], + [ + 1159, + 456 + ], + [ + 1201, + 454 + ], + [ + 1342, + 456 + ], + [ + 1676, + 481 + ], + [ + 2048, + 522 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 0, + 566 + ], + [ + 362, + 525 + ], + [ + 410, + 522 + ], + [ + 422, + 516 + ], + [ + 376, + 511 + ], + [ + 185, + 519 + ], + [ + 161, + 521 + ], + [ + 0, + 543 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1527, + 564 + ], + [ + 1442, + 541 + ], + [ + 1247, + 547 + ], + [ + 1334, + 601 + ], + [ + 1336, + 581 + ], + [ + 1358, + 575 + ], + [ + 1446, + 571 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 7, + 521 + ], + [ + 405, + 488 + ], + [ + 589, + 474 + ], + [ + 668, + 465 + ], + [ + 723, + 454 + ], + [ + 644, + 453 + ], + [ + 0, + 441 + ], + [ + 0, + 521 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1527, + 564 + ], + [ + 1442, + 541 + ], + [ + 1247, + 547 + ], + [ + 1334, + 601 + ], + [ + 1336, + 581 + ], + [ + 1358, + 575 + ], + [ + 1446, + 571 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2030, + 561 + ], + [ + 1826, + 526 + ], + [ + 1794, + 529 + ], + [ + 1531, + 498 + ], + [ + 1438, + 474 + ], + [ + 1391, + 477 + ], + [ + 1323, + 463 + ], + [ + 1265, + 442 + ], + [ + 1187, + 459 + ], + [ + 1161, + 456 + ], + [ + 1110, + 454 + ], + [ + 1057, + 454 + ], + [ + 1001, + 453 + ], + [ + 988, + 326 + ], + [ + 1001, + 292 + ], + [ + 1011, + 281 + ], + [ + 1009, + 231 + ], + [ + 1024, + 172 + ], + [ + 1043, + 164 + ], + [ + 1061, + 172 + ], + [ + 1079, + 230 + ], + [ + 1082, + 320 + ], + [ + 1091, + 319 + ], + [ + 1091, + 310 + ], + [ + 1097, + 318 + ], + [ + 1114, + 314 + ], + [ + 1221, + 206 + ], + [ + 1268, + 132 + ], + [ + 1286, + 79 + ], + [ + 1282, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 566 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 16, + 478 + ], + [ + 336, + 480 + ], + [ + 421, + 473 + ], + [ + 578, + 464 + ], + [ + 654, + 460 + ], + [ + 741, + 453 + ], + [ + 834, + 448 + ], + [ + 847, + 456 + ], + [ + 863, + 455 + ], + [ + 903, + 454 + ], + [ + 934, + 453 + ], + [ + 940, + 453 + ], + [ + 1007, + 453 + ], + [ + 1007, + 437 + ], + [ + 1011, + 434 + ], + [ + 1017, + 431 + ], + [ + 1025, + 431 + ], + [ + 1030, + 431 + ], + [ + 1040, + 430 + ], + [ + 1044, + 429 + ], + [ + 1050, + 427 + ], + [ + 1058, + 427 + ], + [ + 1067, + 423 + ], + [ + 1073, + 418 + ], + [ + 1083, + 413 + ], + [ + 1101, + 412 + ], + [ + 1111, + 410 + ], + [ + 1117, + 404 + ], + [ + 1126, + 393 + ], + [ + 1129, + 378 + ], + [ + 1128, + 364 + ], + [ + 1121, + 353 + ], + [ + 1110, + 343 + ], + [ + 1104, + 334 + ], + [ + 1097, + 329 + ], + [ + 1079, + 329 + ], + [ + 1072, + 328 + ], + [ + 1062, + 328 + ], + [ + 1054, + 329 + ], + [ + 1040, + 342 + ], + [ + 1035, + 353 + ], + [ + 1034, + 342 + ], + [ + 1034, + 325 + ], + [ + 1035, + 304 + ], + [ + 1018, + 284 + ], + [ + 1000, + 267 + ], + [ + 958, + 246 + ], + [ + 951, + 257 + ], + [ + 930, + 251 + ], + [ + 927, + 255 + ], + [ + 928, + 262 + ], + [ + 930, + 269 + ], + [ + 922, + 272 + ], + [ + 916, + 271 + ], + [ + 916, + 267 + ], + [ + 903, + 267 + ], + [ + 911, + 260 + ], + [ + 926, + 251 + ], + [ + 927, + 236 + ], + [ + 922, + 231 + ], + [ + 908, + 231 + ], + [ + 908, + 231 + ], + [ + 908, + 222 + ], + [ + 905, + 208 + ], + [ + 906, + 199 + ], + [ + 902, + 200 + ], + [ + 893, + 206 + ], + [ + 886, + 203 + ], + [ + 884, + 198 + ], + [ + 870, + 187 + ], + [ + 859, + 180 + ], + [ + 847, + 171 + ], + [ + 835, + 163 + ], + [ + 823, + 160 + ], + [ + 820, + 157 + ], + [ + 820, + 143 + ], + [ + 815, + 141 + ], + [ + 813, + 136 + ], + [ + 805, + 132 + ], + [ + 799, + 139 + ], + [ + 796, + 133 + ], + [ + 775, + 113 + ], + [ + 768, + 117 + ], + [ + 764, + 113 + ], + [ + 758, + 106 + ], + [ + 736, + 115 + ], + [ + 725, + 98 + ], + [ + 717, + 102 + ], + [ + 699, + 92 + ], + [ + 699, + 102 + ], + [ + 690, + 101 + ], + [ + 685, + 95 + ], + [ + 676, + 104 + ], + [ + 667, + 106 + ], + [ + 663, + 101 + ], + [ + 671, + 95 + ], + [ + 671, + 85 + ], + [ + 659, + 76 + ], + [ + 647, + 89 + ], + [ + 645, + 82 + ], + [ + 640, + 66 + ], + [ + 642, + 58 + ], + [ + 652, + 53 + ], + [ + 650, + 40 + ], + [ + 607, + 40 + ], + [ + 613, + 25 + ], + [ + 617, + 6 + ], + [ + 599, + 6 + ], + [ + 578, + 13 + ], + [ + 522, + 25 + ], + [ + 518, + 28 + ], + [ + 509, + 36 + ], + [ + 497, + 37 + ], + [ + 476, + 37 + ], + [ + 456, + 33 + ], + [ + 443, + 24 + ], + [ + 442, + 18 + ], + [ + 451, + 8 + ], + [ + 459, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 481 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1287, + 413 + ], + [ + 1284, + 51 + ], + [ + 1261, + 38 + ], + [ + 1252, + 33 + ], + [ + 1242, + 33 + ], + [ + 1231, + 23 + ], + [ + 1221, + 16 + ], + [ + 1217, + 6 + ], + [ + 1205, + 2 + ], + [ + 1194, + 6 + ], + [ + 1185, + 5 + ], + [ + 1174, + 1 + ], + [ + 1167, + 0 + ], + [ + 1165, + 5 + ], + [ + 1166, + 15 + ], + [ + 1167, + 20 + ], + [ + 1155, + 34 + ], + [ + 1155, + 44 + ], + [ + 1161, + 54 + ], + [ + 1164, + 63 + ], + [ + 1164, + 67 + ], + [ + 1184, + 73 + ], + [ + 1191, + 79 + ], + [ + 1188, + 85 + ], + [ + 1161, + 95 + ], + [ + 1152, + 97 + ], + [ + 1142, + 109 + ], + [ + 1136, + 119 + ], + [ + 1129, + 118 + ], + [ + 1116, + 123 + ], + [ + 1109, + 124 + ], + [ + 1094, + 125 + ], + [ + 1082, + 126 + ], + [ + 1090, + 132 + ], + [ + 1102, + 136 + ], + [ + 1113, + 136 + ], + [ + 1128, + 139 + ], + [ + 1137, + 148 + ], + [ + 1130, + 156 + ], + [ + 1114, + 163 + ], + [ + 1103, + 169 + ], + [ + 1095, + 180 + ], + [ + 1093, + 191 + ], + [ + 1092, + 205 + ], + [ + 1086, + 218 + ], + [ + 1092, + 220 + ], + [ + 1106, + 213 + ], + [ + 1121, + 202 + ], + [ + 1133, + 197 + ], + [ + 1143, + 201 + ], + [ + 1130, + 219 + ], + [ + 1121, + 232 + ], + [ + 1120, + 245 + ], + [ + 1120, + 256 + ], + [ + 1111, + 272 + ], + [ + 1113, + 284 + ], + [ + 1113, + 302 + ], + [ + 1109, + 309 + ], + [ + 1102, + 323 + ], + [ + 1101, + 337 + ], + [ + 1111, + 351 + ], + [ + 1125, + 371 + ], + [ + 1135, + 377 + ], + [ + 1143, + 377 + ], + [ + 1148, + 371 + ], + [ + 1157, + 371 + ], + [ + 1167, + 374 + ], + [ + 1177, + 374 + ], + [ + 1186, + 374 + ], + [ + 1204, + 371 + ], + [ + 1207, + 375 + ], + [ + 1227, + 401 + ], + [ + 1234, + 418 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1816, + 450 + ], + [ + 1821, + 615 + ], + [ + 1843, + 615 + ], + [ + 1840, + 451 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1626, + 231 + ], + [ + 1637, + 561 + ], + [ + 1645, + 562 + ], + [ + 1641, + 227 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1598, + 99 + ], + [ + 1604, + 235 + ], + [ + 1662, + 234 + ], + [ + 1662, + 92 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1437, + 393 + ], + [ + 1431, + 399 + ], + [ + 1429, + 406 + ], + [ + 1429, + 418 + ], + [ + 1428, + 428 + ], + [ + 1427, + 441 + ], + [ + 1429, + 458 + ], + [ + 1428, + 470 + ], + [ + 1424, + 479 + ], + [ + 1425, + 485 + ], + [ + 1430, + 488 + ], + [ + 1441, + 488 + ], + [ + 1445, + 482 + ], + [ + 1448, + 456 + ], + [ + 1448, + 429 + ], + [ + 1451, + 412 + ], + [ + 1447, + 405 + ], + [ + 1447, + 400 + ], + [ + 1444, + 392 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1413, + 454 + ], + [ + 1412, + 580 + ], + [ + 1423, + 580 + ], + [ + 1427, + 454 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1468, + 454 + ], + [ + 1471, + 598 + ], + [ + 1484, + 602 + ], + [ + 1484, + 455 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1499, + 464 + ], + [ + 1507, + 624 + ], + [ + 1524, + 625 + ], + [ + 1520, + 463 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1439, + 584 + ], + [ + 1434, + 454 + ], + [ + 1449, + 454 + ], + [ + 1452, + 588 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 414, + 291 + ], + [ + 419, + 454 + ], + [ + 421, + 457 + ], + [ + 417, + 289 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 437, + 286 + ], + [ + 415, + 286 + ], + [ + 412, + 289 + ], + [ + 411, + 293 + ], + [ + 417, + 295 + ], + [ + 421, + 294 + ], + [ + 437, + 291 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 291, + 290 + ], + [ + 275, + 290 + ], + [ + 276, + 317 + ], + [ + 285, + 317 + ], + [ + 289, + 317 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 267, + 239 + ], + [ + 269, + 440 + ], + [ + 281, + 440 + ], + [ + 276, + 238 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 400, + 436 + ], + [ + 364, + 435 + ], + [ + 348, + 438 + ], + [ + 338, + 446 + ], + [ + 339, + 477 + ], + [ + 345, + 484 + ], + [ + 346, + 485 + ], + [ + 352, + 485 + ], + [ + 362, + 485 + ], + [ + 383, + 485 + ], + [ + 384, + 484 + ], + [ + 391, + 480 + ], + [ + 398, + 480 + ], + [ + 404, + 481 + ], + [ + 409, + 484 + ], + [ + 412, + 484 + ], + [ + 418, + 484 + ], + [ + 424, + 474 + ], + [ + 424, + 461 + ], + [ + 421, + 449 + ], + [ + 412, + 442 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 260, + 443 + ], + [ + 267, + 436 + ], + [ + 282, + 433 + ], + [ + 309, + 432 + ], + [ + 331, + 432 + ], + [ + 339, + 441 + ], + [ + 346, + 457 + ], + [ + 350, + 468 + ], + [ + 348, + 480 + ], + [ + 346, + 488 + ], + [ + 343, + 491 + ], + [ + 333, + 492 + ], + [ + 328, + 485 + ], + [ + 322, + 485 + ], + [ + 320, + 490 + ], + [ + 319, + 494 + ], + [ + 314, + 494 + ], + [ + 307, + 492 + ], + [ + 304, + 488 + ], + [ + 279, + 490 + ], + [ + 267, + 486 + ], + [ + 258, + 470 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 182, + 459 + ], + [ + 191, + 440 + ], + [ + 216, + 436 + ], + [ + 238, + 436 + ], + [ + 255, + 438 + ], + [ + 265, + 442 + ], + [ + 271, + 452 + ], + [ + 276, + 464 + ], + [ + 272, + 489 + ], + [ + 271, + 492 + ], + [ + 241, + 494 + ], + [ + 240, + 497 + ], + [ + 237, + 499 + ], + [ + 231, + 500 + ], + [ + 227, + 498 + ], + [ + 222, + 495 + ], + [ + 206, + 494 + ], + [ + 196, + 494 + ], + [ + 180, + 483 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 82, + 423 + ], + [ + 165, + 424 + ], + [ + 181, + 431 + ], + [ + 193, + 454 + ], + [ + 196, + 478 + ], + [ + 196, + 498 + ], + [ + 190, + 504 + ], + [ + 179, + 506 + ], + [ + 172, + 504 + ], + [ + 167, + 502 + ], + [ + 143, + 500 + ], + [ + 130, + 504 + ], + [ + 118, + 507 + ], + [ + 108, + 508 + ], + [ + 98, + 508 + ], + [ + 94, + 503 + ], + [ + 89, + 499 + ], + [ + 47, + 497 + ], + [ + 45, + 503 + ], + [ + 39, + 505 + ], + [ + 34, + 508 + ], + [ + 28, + 506 + ], + [ + 10, + 468 + ], + [ + 22, + 446 + ], + [ + 32, + 448 + ], + [ + 43, + 443 + ], + [ + 54, + 441 + ], + [ + 63, + 435 + ], + [ + 71, + 429 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 29, + 509 + ], + [ + 0, + 512 + ], + [ + 0, + 437 + ], + [ + 29, + 438 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 34, + 361 + ], + [ + 0, + 396 + ], + [ + 0, + 324 + ], + [ + 35, + 325 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 116, + 533 + ], + [ + 107, + 0 + ], + [ + 135, + 0 + ], + [ + 138, + 45 + ], + [ + 730, + 38 + ], + [ + 729, + 44 + ], + [ + 140, + 56 + ], + [ + 148, + 533 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 856, + 441 + ], + [ + 841, + 429 + ], + [ + 831, + 429 + ], + [ + 818, + 432 + ], + [ + 811, + 429 + ], + [ + 807, + 423 + ], + [ + 791, + 419 + ], + [ + 761, + 419 + ], + [ + 754, + 426 + ], + [ + 748, + 435 + ], + [ + 741, + 442 + ], + [ + 734, + 442 + ], + [ + 725, + 432 + ], + [ + 707, + 430 + ], + [ + 696, + 430 + ], + [ + 687, + 427 + ], + [ + 679, + 426 + ], + [ + 671, + 429 + ], + [ + 665, + 432 + ], + [ + 659, + 441 + ], + [ + 655, + 450 + ], + [ + 653, + 457 + ], + [ + 652, + 464 + ], + [ + 655, + 468 + ], + [ + 667, + 469 + ], + [ + 699, + 468 + ], + [ + 710, + 465 + ], + [ + 728, + 465 + ], + [ + 742, + 463 + ], + [ + 756, + 463 + ], + [ + 771, + 462 + ], + [ + 793, + 461 + ], + [ + 826, + 461 + ], + [ + 836, + 461 + ], + [ + 842, + 459 + ], + [ + 849, + 456 + ], + [ + 853, + 455 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 456, + 502 + ], + [ + 549, + 504 + ], + [ + 698, + 484 + ], + [ + 792, + 473 + ], + [ + 844, + 465 + ], + [ + 892, + 460 + ], + [ + 936, + 456 + ], + [ + 963, + 455 + ], + [ + 934, + 452 + ], + [ + 845, + 453 + ], + [ + 783, + 457 + ], + [ + 744, + 461 + ], + [ + 710, + 464 + ], + [ + 665, + 470 + ], + [ + 644, + 473 + ], + [ + 568, + 479 + ], + [ + 465, + 493 + ], + [ + 459, + 497 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 588, + 429 + ], + [ + 573, + 430 + ], + [ + 573, + 472 + ], + [ + 587, + 471 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 995, + 443 + ], + [ + 979, + 443 + ], + [ + 965, + 446 + ], + [ + 955, + 444 + ], + [ + 939, + 442 + ], + [ + 933, + 442 + ], + [ + 930, + 441 + ], + [ + 925, + 444 + ], + [ + 924, + 449 + ], + [ + 925, + 452 + ], + [ + 940, + 454 + ], + [ + 950, + 454 + ], + [ + 958, + 454 + ], + [ + 962, + 455 + ], + [ + 972, + 457 + ], + [ + 986, + 456 + ], + [ + 993, + 455 + ], + [ + 998, + 455 + ], + [ + 1000, + 451 + ], + [ + 997, + 445 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 933, + 278 + ], + [ + 936, + 454 + ], + [ + 941, + 454 + ], + [ + 938, + 334 + ], + [ + 1023, + 329 + ], + [ + 1024, + 327 + ], + [ + 941, + 329 + ], + [ + 938, + 278 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 962, + 294 + ], + [ + 781, + 294 + ], + [ + 781, + 300 + ], + [ + 961, + 297 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 769, + 156 + ], + [ + 778, + 466 + ], + [ + 784, + 465 + ], + [ + 782, + 248 + ], + [ + 965, + 246 + ], + [ + 966, + 241 + ], + [ + 780, + 242 + ], + [ + 777, + 156 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 338, + 500 + ], + [ + 249, + 508 + ], + [ + 211, + 508 + ], + [ + 194, + 512 + ], + [ + 175, + 518 + ], + [ + 162, + 518 + ], + [ + 152, + 522 + ], + [ + 161, + 526 + ], + [ + 196, + 527 + ], + [ + 236, + 525 + ], + [ + 263, + 524 + ], + [ + 300, + 523 + ], + [ + 325, + 523 + ], + [ + 363, + 518 + ], + [ + 380, + 515 + ], + [ + 397, + 516 + ], + [ + 404, + 517 + ], + [ + 413, + 517 + ], + [ + 420, + 517 + ], + [ + 424, + 516 + ], + [ + 409, + 513 + ], + [ + 378, + 510 + ], + [ + 357, + 507 + ], + [ + 347, + 503 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 588, + 483 + ], + [ + 584, + 424 + ], + [ + 593, + 424 + ], + [ + 595, + 482 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 897, + 439 + ], + [ + 887, + 437 + ], + [ + 882, + 439 + ], + [ + 878, + 445 + ], + [ + 876, + 449 + ], + [ + 876, + 452 + ], + [ + 879, + 454 + ], + [ + 885, + 455 + ], + [ + 891, + 455 + ], + [ + 899, + 455 + ], + [ + 903, + 454 + ], + [ + 905, + 452 + ], + [ + 902, + 443 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1021, + 317 + ], + [ + 1021, + 345 + ], + [ + 1035, + 345 + ], + [ + 1034, + 317 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1001, + 465 + ], + [ + 1070, + 467 + ], + [ + 1070, + 464 + ], + [ + 1053, + 460 + ], + [ + 1040, + 458 + ], + [ + 1027, + 459 + ], + [ + 1009, + 460 + ], + [ + 1000, + 462 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1025, + 356 + ], + [ + 1027, + 463 + ], + [ + 1029, + 463 + ], + [ + 1028, + 355 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1014, + 378 + ], + [ + 1016, + 400 + ], + [ + 1027, + 401 + ], + [ + 1027, + 378 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1031, + 401 + ], + [ + 1030, + 376 + ], + [ + 1023, + 376 + ], + [ + 1025, + 400 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1112, + 429 + ], + [ + 1068, + 428 + ], + [ + 1028, + 431 + ], + [ + 1029, + 455 + ], + [ + 1056, + 456 + ], + [ + 1079, + 456 + ], + [ + 1101, + 453 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1120, + 427 + ], + [ + 1086, + 427 + ], + [ + 1079, + 434 + ], + [ + 1075, + 447 + ], + [ + 1074, + 463 + ], + [ + 1076, + 470 + ], + [ + 1080, + 471 + ], + [ + 1083, + 471 + ], + [ + 1085, + 466 + ], + [ + 1121, + 467 + ], + [ + 1122, + 470 + ], + [ + 1126, + 470 + ], + [ + 1128, + 470 + ], + [ + 1130, + 467 + ], + [ + 1131, + 455 + ], + [ + 1130, + 443 + ], + [ + 1123, + 431 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1046, + 442 + ], + [ + 1047, + 463 + ], + [ + 1050, + 463 + ], + [ + 1049, + 448 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1053, + 435 + ], + [ + 1040, + 435 + ], + [ + 1042, + 454 + ], + [ + 1055, + 454 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1169, + 343 + ], + [ + 1170, + 422 + ], + [ + 1171, + 422 + ], + [ + 1172, + 422 + ], + [ + 1172, + 341 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1203, + 365 + ], + [ + 1177, + 366 + ], + [ + 1177, + 417 + ], + [ + 1215, + 419 + ], + [ + 1217, + 360 + ], + [ + 1194, + 361 + ], + [ + 1194, + 366 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1183, + 314 + ], + [ + 1182, + 416 + ], + [ + 1184, + 416 + ], + [ + 1185, + 314 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1263, + 486 + ], + [ + 1349, + 511 + ], + [ + 1370, + 517 + ], + [ + 1280, + 520 + ], + [ + 1242, + 523 + ], + [ + 1236, + 533 + ], + [ + 1236, + 536 + ], + [ + 1190, + 495 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1186, + 378 + ], + [ + 1177, + 378 + ], + [ + 1178, + 410 + ], + [ + 1189, + 411 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1263, + 486 + ], + [ + 1349, + 511 + ], + [ + 1370, + 517 + ], + [ + 1280, + 520 + ], + [ + 1242, + 523 + ], + [ + 1236, + 533 + ], + [ + 1236, + 536 + ], + [ + 1190, + 495 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1206, + 255 + ], + [ + 1207, + 418 + ], + [ + 1210, + 419 + ], + [ + 1209, + 253 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1188, + 256 + ], + [ + 1208, + 256 + ], + [ + 1208, + 247 + ], + [ + 1185, + 247 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1183, + 312 + ], + [ + 1181, + 309 + ], + [ + 1172, + 309 + ], + [ + 1172, + 312 + ], + [ + 1177, + 314 + ], + [ + 1182, + 314 + ], + [ + 1185, + 315 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1161, + 340 + ], + [ + 1168, + 339 + ], + [ + 1171, + 343 + ], + [ + 1171, + 344 + ], + [ + 1167, + 344 + ], + [ + 1162, + 343 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1189, + 346 + ], + [ + 1206, + 314 + ], + [ + 1221, + 345 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1201, + 422 + ], + [ + 1199, + 393 + ], + [ + 1202, + 393 + ], + [ + 1203, + 417 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1199, + 396 + ], + [ + 1198, + 348 + ], + [ + 1214, + 348 + ], + [ + 1213, + 395 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1166, + 428 + ], + [ + 1160, + 436 + ], + [ + 1157, + 441 + ], + [ + 1156, + 449 + ], + [ + 1156, + 457 + ], + [ + 1158, + 459 + ], + [ + 1161, + 460 + ], + [ + 1162, + 463 + ], + [ + 1164, + 469 + ], + [ + 1165, + 476 + ], + [ + 1169, + 478 + ], + [ + 1173, + 479 + ], + [ + 1177, + 480 + ], + [ + 1183, + 478 + ], + [ + 1192, + 478 + ], + [ + 1197, + 477 + ], + [ + 1206, + 459 + ], + [ + 1210, + 446 + ], + [ + 1210, + 432 + ], + [ + 1213, + 420 + ], + [ + 1212, + 417 + ], + [ + 1207, + 414 + ], + [ + 1203, + 412 + ], + [ + 1197, + 412 + ], + [ + 1189, + 412 + ], + [ + 1181, + 412 + ], + [ + 1175, + 414 + ], + [ + 1171, + 415 + ], + [ + 1169, + 421 + ], + [ + 1167, + 425 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1220, + 415 + ], + [ + 1211, + 411 + ], + [ + 1206, + 414 + ], + [ + 1202, + 422 + ], + [ + 1198, + 430 + ], + [ + 1197, + 434 + ], + [ + 1194, + 436 + ], + [ + 1191, + 443 + ], + [ + 1192, + 448 + ], + [ + 1191, + 460 + ], + [ + 1193, + 480 + ], + [ + 1194, + 496 + ], + [ + 1196, + 499 + ], + [ + 1197, + 502 + ], + [ + 1198, + 504 + ], + [ + 1203, + 502 + ], + [ + 1215, + 482 + ], + [ + 1236, + 458 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1305, + 405 + ], + [ + 1285, + 405 + ], + [ + 1251, + 406 + ], + [ + 1226, + 408 + ], + [ + 1219, + 414 + ], + [ + 1209, + 432 + ], + [ + 1206, + 437 + ], + [ + 1199, + 439 + ], + [ + 1198, + 444 + ], + [ + 1198, + 454 + ], + [ + 1199, + 466 + ], + [ + 1200, + 484 + ], + [ + 1199, + 496 + ], + [ + 1202, + 505 + ], + [ + 1207, + 507 + ], + [ + 1208, + 508 + ], + [ + 1211, + 510 + ], + [ + 1214, + 512 + ], + [ + 1219, + 513 + ], + [ + 1225, + 513 + ], + [ + 1227, + 512 + ], + [ + 1228, + 509 + ], + [ + 1230, + 504 + ], + [ + 1296, + 499 + ], + [ + 1301, + 506 + ], + [ + 1304, + 509 + ], + [ + 1314, + 510 + ], + [ + 1324, + 510 + ], + [ + 1330, + 504 + ], + [ + 1330, + 488 + ], + [ + 1328, + 466 + ], + [ + 1328, + 446 + ], + [ + 1318, + 426 + ], + [ + 1307, + 410 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1301, + 525 + ], + [ + 1302, + 54 + ], + [ + 1295, + 51 + ], + [ + 1296, + 46 + ], + [ + 1304, + 46 + ], + [ + 1309, + 50 + ], + [ + 1309, + 62 + ], + [ + 1314, + 525 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1298, + 40 + ], + [ + 1288, + 36 + ], + [ + 1258, + 39 + ], + [ + 1259, + 54 + ], + [ + 1279, + 59 + ], + [ + 1299, + 58 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1322, + 218 + ], + [ + 1286, + 217 + ], + [ + 1290, + 308 + ], + [ + 1323, + 309 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1298, + 311 + ], + [ + 1292, + 316 + ], + [ + 1289, + 324 + ], + [ + 1289, + 334 + ], + [ + 1290, + 340 + ], + [ + 1295, + 345 + ], + [ + 1299, + 348 + ], + [ + 1305, + 348 + ], + [ + 1312, + 342 + ], + [ + 1317, + 334 + ], + [ + 1319, + 327 + ], + [ + 1314, + 317 + ], + [ + 1308, + 310 + ], + [ + 1302, + 310 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000201_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000201_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..a6d249253c17095e46fc47480d265c3d88bc3636 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000201_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000201_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000201_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..72dca96b5f7f8b1adb6ea23aae6fdb044e44078e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000201_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..4fe3c2574886483158f7eadd56a3f7d53184316c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..c47b49a49b517adb380bf290f5fed358ac70043b Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..d1199234556f4385d1a4e0ea8e1dd126f90e994e --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000202_000019_gtFine_polygons.json @@ -0,0 +1,6109 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 71, + 257 + ], + [ + 2048, + 249 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 257 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 823, + 19 + ], + [ + 1053, + 248 + ], + [ + 1151, + 168 + ], + [ + 1147, + 0 + ], + [ + 860, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2027, + 671 + ], + [ + 1973, + 710 + ], + [ + 1965, + 755 + ], + [ + 1990, + 826 + ], + [ + 2042, + 870 + ], + [ + 2048, + 879 + ], + [ + 2048, + 661 + ], + [ + 2040, + 666 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 484, + 483 + ], + [ + 545, + 483 + ], + [ + 693, + 466 + ], + [ + 705, + 459 + ], + [ + 711, + 441 + ], + [ + 688, + 439 + ], + [ + 609, + 445 + ], + [ + 529, + 450 + ], + [ + 489, + 454 + ], + [ + 470, + 464 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2049, + 511 + ], + [ + 1819, + 517 + ], + [ + 1562, + 509 + ], + [ + 1378, + 502 + ], + [ + 1304, + 493 + ], + [ + 1278, + 474 + ], + [ + 1271, + 472 + ], + [ + 1181, + 462 + ], + [ + 1171, + 461 + ], + [ + 1174, + 457 + ], + [ + 1187, + 454 + ], + [ + 1261, + 431 + ], + [ + 1353, + 419 + ], + [ + 2048, + 475 + ], + [ + 2048, + 514 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1097, + 400 + ], + [ + 1029, + 404 + ], + [ + 919, + 408 + ], + [ + 907, + 410 + ], + [ + 885, + 406 + ], + [ + 885, + 400 + ], + [ + 913, + 385 + ], + [ + 979, + 380 + ], + [ + 1030, + 380 + ], + [ + 1048, + 381 + ], + [ + 1053, + 382 + ], + [ + 1063, + 390 + ], + [ + 1068, + 392 + ], + [ + 1077, + 393 + ], + [ + 1084, + 393 + ], + [ + 1092, + 395 + ], + [ + 1099, + 397 + ], + [ + 1100, + 399 + ], + [ + 1100, + 399 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 901, + 434 + ], + [ + 931, + 429 + ], + [ + 943, + 427 + ], + [ + 942, + 424 + ], + [ + 938, + 423 + ], + [ + 950, + 419 + ], + [ + 961, + 416 + ], + [ + 963, + 413 + ], + [ + 962, + 412 + ], + [ + 952, + 411 + ], + [ + 938, + 411 + ], + [ + 928, + 412 + ], + [ + 919, + 412 + ], + [ + 895, + 430 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1189, + 392 + ], + [ + 1086, + 389 + ], + [ + 1088, + 381 + ], + [ + 1189, + 381 + ], + [ + 1191, + 388 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 2047, + 488 + ], + [ + 1554, + 478 + ], + [ + 1442, + 453 + ], + [ + 1394, + 450 + ], + [ + 1357, + 437 + ], + [ + 1328, + 430 + ], + [ + 1294, + 401 + ], + [ + 1258, + 396 + ], + [ + 1197, + 388 + ], + [ + 1061, + 385 + ], + [ + 973, + 395 + ], + [ + 940, + 395 + ], + [ + 892, + 397 + ], + [ + 868, + 414 + ], + [ + 663, + 424 + ], + [ + 545, + 454 + ], + [ + 509, + 462 + ], + [ + 327, + 461 + ], + [ + 0, + 413 + ], + [ + 0, + 0 + ], + [ + 879, + 0 + ], + [ + 901, + 33 + ], + [ + 1034, + 45 + ], + [ + 1336, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 490 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1191, + 398 + ], + [ + 1182, + 403 + ], + [ + 1121, + 414 + ], + [ + 1114, + 414 + ], + [ + 1097, + 414 + ], + [ + 1087, + 413 + ], + [ + 1092, + 409 + ], + [ + 1125, + 406 + ], + [ + 1160, + 398 + ], + [ + 1179, + 395 + ], + [ + 1187, + 396 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1934, + 441 + ], + [ + 1934, + 510 + ], + [ + 1941, + 510 + ], + [ + 1939, + 440 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1766, + 203 + ], + [ + 1766, + 218 + ], + [ + 1779, + 214 + ], + [ + 1778, + 200 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1786, + 185 + ], + [ + 1759, + 192 + ], + [ + 1760, + 211 + ], + [ + 1788, + 203 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1787, + 261 + ], + [ + 1808, + 258 + ], + [ + 1809, + 264 + ], + [ + 1828, + 266 + ], + [ + 1827, + 274 + ], + [ + 1810, + 281 + ], + [ + 1810, + 285 + ], + [ + 1826, + 285 + ], + [ + 1826, + 293 + ], + [ + 1811, + 301 + ], + [ + 1811, + 308 + ], + [ + 1827, + 307 + ], + [ + 1826, + 317 + ], + [ + 1812, + 320 + ], + [ + 1809, + 328 + ], + [ + 1799, + 329 + ], + [ + 1786, + 329 + ], + [ + 1788, + 325 + ], + [ + 1797, + 323 + ], + [ + 1797, + 262 + ], + [ + 1786, + 265 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1776, + 508 + ], + [ + 1777, + 439 + ], + [ + 1781, + 432 + ], + [ + 1784, + 228 + ], + [ + 1784, + 111 + ], + [ + 1792, + 80 + ], + [ + 1802, + 66 + ], + [ + 1889, + 1 + ], + [ + 1896, + 0 + ], + [ + 1902, + 0 + ], + [ + 1902, + 2 + ], + [ + 1888, + 7 + ], + [ + 1804, + 70 + ], + [ + 1794, + 85 + ], + [ + 1788, + 111 + ], + [ + 1789, + 432 + ], + [ + 1792, + 440 + ], + [ + 1792, + 510 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1761, + 258 + ], + [ + 1762, + 328 + ], + [ + 1779, + 328 + ], + [ + 1782, + 322 + ], + [ + 1788, + 313 + ], + [ + 1786, + 308 + ], + [ + 1782, + 305 + ], + [ + 1780, + 304 + ], + [ + 1782, + 300 + ], + [ + 1787, + 295 + ], + [ + 1789, + 293 + ], + [ + 1789, + 289 + ], + [ + 1782, + 283 + ], + [ + 1780, + 279 + ], + [ + 1784, + 278 + ], + [ + 1787, + 277 + ], + [ + 1788, + 274 + ], + [ + 1788, + 269 + ], + [ + 1784, + 266 + ], + [ + 1783, + 259 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1962, + 52 + ], + [ + 1942, + 62 + ], + [ + 1940, + 50 + ], + [ + 1926, + 49 + ], + [ + 1927, + 10 + ], + [ + 1930, + 0 + ], + [ + 1970, + 0 + ], + [ + 1965, + 7 + ], + [ + 1976, + 6 + ], + [ + 1973, + 14 + ], + [ + 1960, + 20 + ], + [ + 1959, + 28 + ], + [ + 1975, + 28 + ], + [ + 1974, + 36 + ], + [ + 1959, + 40 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1892, + 23 + ], + [ + 1894, + 0 + ], + [ + 1942, + 0 + ], + [ + 1942, + 8 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1634, + 466 + ], + [ + 1650, + 445 + ], + [ + 1672, + 445 + ], + [ + 1692, + 453 + ], + [ + 1702, + 463 + ], + [ + 1720, + 464 + ], + [ + 1728, + 465 + ], + [ + 1730, + 469 + ], + [ + 1730, + 476 + ], + [ + 1726, + 482 + ], + [ + 1639, + 482 + ], + [ + 1633, + 480 + ], + [ + 1634, + 476 + ], + [ + 1634, + 473 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1549, + 436 + ], + [ + 1550, + 499 + ], + [ + 1556, + 500 + ], + [ + 1556, + 437 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1366, + 300 + ], + [ + 1366, + 343 + ], + [ + 1409, + 342 + ], + [ + 1408, + 300 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1442, + 284 + ], + [ + 1430, + 281 + ], + [ + 1410, + 286 + ], + [ + 1412, + 293 + ], + [ + 1422, + 293 + ], + [ + 1423, + 302 + ], + [ + 1414, + 302 + ], + [ + 1412, + 309 + ], + [ + 1423, + 314 + ], + [ + 1422, + 320 + ], + [ + 1414, + 320 + ], + [ + 1411, + 327 + ], + [ + 1425, + 332 + ], + [ + 1426, + 338 + ], + [ + 1442, + 341 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1440, + 396 + ], + [ + 1434, + 391 + ], + [ + 1433, + 385 + ], + [ + 1433, + 378 + ], + [ + 1429, + 372 + ], + [ + 1425, + 373 + ], + [ + 1420, + 383 + ], + [ + 1419, + 387 + ], + [ + 1419, + 393 + ], + [ + 1416, + 395 + ], + [ + 1412, + 401 + ], + [ + 1410, + 410 + ], + [ + 1414, + 418 + ], + [ + 1417, + 423 + ], + [ + 1419, + 432 + ], + [ + 1419, + 443 + ], + [ + 1418, + 450 + ], + [ + 1419, + 459 + ], + [ + 1419, + 461 + ], + [ + 1426, + 461 + ], + [ + 1431, + 461 + ], + [ + 1439, + 461 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1443, + 438 + ], + [ + 1427, + 439 + ], + [ + 1425, + 471 + ], + [ + 1442, + 475 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1438, + 493 + ], + [ + 1438, + 1 + ], + [ + 1448, + 0 + ], + [ + 1448, + 493 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1408, + 263 + ], + [ + 1446, + 261 + ], + [ + 1447, + 266 + ], + [ + 1415, + 266 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1407, + 250 + ], + [ + 1400, + 251 + ], + [ + 1392, + 255 + ], + [ + 1390, + 263 + ], + [ + 1391, + 271 + ], + [ + 1396, + 277 + ], + [ + 1407, + 279 + ], + [ + 1413, + 278 + ], + [ + 1419, + 272 + ], + [ + 1421, + 265 + ], + [ + 1417, + 256 + ], + [ + 1413, + 251 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1449, + 278 + ], + [ + 1438, + 276 + ], + [ + 1435, + 280 + ], + [ + 1430, + 281 + ], + [ + 1426, + 288 + ], + [ + 1426, + 294 + ], + [ + 1430, + 296 + ], + [ + 1433, + 298 + ], + [ + 1434, + 302 + ], + [ + 1438, + 304 + ], + [ + 1445, + 305 + ], + [ + 1449, + 300 + ], + [ + 1450, + 282 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 1390, + 384 + ], + [ + 1386, + 388 + ], + [ + 1384, + 394 + ], + [ + 1382, + 397 + ], + [ + 1379, + 399 + ], + [ + 1378, + 405 + ], + [ + 1377, + 413 + ], + [ + 1379, + 419 + ], + [ + 1381, + 430 + ], + [ + 1385, + 448 + ], + [ + 1384, + 457 + ], + [ + 1384, + 459 + ], + [ + 1388, + 463 + ], + [ + 1392, + 463 + ], + [ + 1396, + 461 + ], + [ + 1393, + 456 + ], + [ + 1395, + 445 + ], + [ + 1394, + 436 + ], + [ + 1396, + 431 + ], + [ + 1400, + 430 + ], + [ + 1404, + 429 + ], + [ + 1404, + 416 + ], + [ + 1402, + 403 + ], + [ + 1400, + 397 + ], + [ + 1396, + 396 + ], + [ + 1395, + 389 + ], + [ + 1393, + 385 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1298, + 377 + ], + [ + 1290, + 375 + ], + [ + 1285, + 377 + ], + [ + 1279, + 379 + ], + [ + 1275, + 381 + ], + [ + 1263, + 382 + ], + [ + 1249, + 382 + ], + [ + 1239, + 384 + ], + [ + 1233, + 385 + ], + [ + 1235, + 395 + ], + [ + 1239, + 401 + ], + [ + 1250, + 406 + ], + [ + 1262, + 419 + ], + [ + 1271, + 402 + ], + [ + 1275, + 401 + ], + [ + 1298, + 396 + ], + [ + 1301, + 391 + ], + [ + 1301, + 387 + ], + [ + 1300, + 380 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1258, + 278 + ], + [ + 1257, + 370 + ], + [ + 1258, + 379 + ], + [ + 1262, + 280 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1280, + 228 + ], + [ + 1280, + 454 + ], + [ + 1285, + 454 + ], + [ + 1284, + 228 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1268, + 357 + ], + [ + 1261, + 362 + ], + [ + 1261, + 366 + ], + [ + 1262, + 374 + ], + [ + 1266, + 378 + ], + [ + 1272, + 378 + ], + [ + 1276, + 374 + ], + [ + 1276, + 363 + ], + [ + 1274, + 360 + ], + [ + 1271, + 357 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1276, + 339 + ], + [ + 1275, + 376 + ], + [ + 1287, + 376 + ], + [ + 1286, + 339 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1288, + 349 + ], + [ + 1284, + 347 + ], + [ + 1280, + 353 + ], + [ + 1279, + 363 + ], + [ + 1281, + 370 + ], + [ + 1285, + 374 + ], + [ + 1287, + 373 + ], + [ + 1290, + 370 + ], + [ + 1292, + 362 + ], + [ + 1291, + 356 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1331, + 385 + ], + [ + 1332, + 361 + ], + [ + 1328, + 330 + ], + [ + 1326, + 310 + ], + [ + 1330, + 300 + ], + [ + 1326, + 278 + ], + [ + 1326, + 262 + ], + [ + 1328, + 248 + ], + [ + 1334, + 234 + ], + [ + 1333, + 215 + ], + [ + 1338, + 214 + ], + [ + 1339, + 206 + ], + [ + 1334, + 202 + ], + [ + 1344, + 200 + ], + [ + 1346, + 205 + ], + [ + 1349, + 207 + ], + [ + 1352, + 204 + ], + [ + 1349, + 201 + ], + [ + 1349, + 198 + ], + [ + 1353, + 197 + ], + [ + 1357, + 194 + ], + [ + 1357, + 191 + ], + [ + 1358, + 178 + ], + [ + 1361, + 176 + ], + [ + 1371, + 171 + ], + [ + 1367, + 177 + ], + [ + 1369, + 180 + ], + [ + 1374, + 183 + ], + [ + 1378, + 184 + ], + [ + 1386, + 185 + ], + [ + 1385, + 178 + ], + [ + 1386, + 171 + ], + [ + 1390, + 166 + ], + [ + 1395, + 159 + ], + [ + 1393, + 155 + ], + [ + 1388, + 156 + ], + [ + 1386, + 157 + ], + [ + 1385, + 149 + ], + [ + 1387, + 144 + ], + [ + 1390, + 139 + ], + [ + 1393, + 137 + ], + [ + 1397, + 130 + ], + [ + 1397, + 125 + ], + [ + 1390, + 127 + ], + [ + 1382, + 127 + ], + [ + 1382, + 120 + ], + [ + 1387, + 113 + ], + [ + 1401, + 116 + ], + [ + 1407, + 117 + ], + [ + 1409, + 113 + ], + [ + 1407, + 107 + ], + [ + 1407, + 99 + ], + [ + 1398, + 98 + ], + [ + 1389, + 89 + ], + [ + 1380, + 83 + ], + [ + 1377, + 80 + ], + [ + 1368, + 76 + ], + [ + 1366, + 71 + ], + [ + 1356, + 76 + ], + [ + 1349, + 76 + ], + [ + 1346, + 70 + ], + [ + 1338, + 65 + ], + [ + 1333, + 58 + ], + [ + 1329, + 58 + ], + [ + 1328, + 62 + ], + [ + 1325, + 59 + ], + [ + 1326, + 54 + ], + [ + 1333, + 53 + ], + [ + 1342, + 53 + ], + [ + 1345, + 49 + ], + [ + 1353, + 48 + ], + [ + 1358, + 45 + ], + [ + 1364, + 45 + ], + [ + 1371, + 49 + ], + [ + 1378, + 58 + ], + [ + 1380, + 64 + ], + [ + 1393, + 66 + ], + [ + 1399, + 66 + ], + [ + 1405, + 62 + ], + [ + 1408, + 62 + ], + [ + 1408, + 36 + ], + [ + 1405, + 36 + ], + [ + 1402, + 34 + ], + [ + 1403, + 30 + ], + [ + 1406, + 29 + ], + [ + 1408, + 25 + ], + [ + 1408, + 20 + ], + [ + 1405, + 18 + ], + [ + 1399, + 18 + ], + [ + 1395, + 16 + ], + [ + 1399, + 12 + ], + [ + 1396, + 7 + ], + [ + 1396, + 4 + ], + [ + 1407, + 4 + ], + [ + 1414, + 3 + ], + [ + 1420, + 0 + ], + [ + 1024, + 0 + ], + [ + 1025, + 4 + ], + [ + 1027, + 5 + ], + [ + 1030, + 6 + ], + [ + 1034, + 4 + ], + [ + 1034, + 0 + ], + [ + 1060, + 0 + ], + [ + 1057, + 2 + ], + [ + 1053, + 5 + ], + [ + 1055, + 11 + ], + [ + 1056, + 13 + ], + [ + 1058, + 9 + ], + [ + 1063, + 9 + ], + [ + 1066, + 8 + ], + [ + 1071, + 11 + ], + [ + 1073, + 14 + ], + [ + 1074, + 20 + ], + [ + 1067, + 17 + ], + [ + 1063, + 19 + ], + [ + 1061, + 20 + ], + [ + 1054, + 20 + ], + [ + 1048, + 18 + ], + [ + 1044, + 15 + ], + [ + 1038, + 14 + ], + [ + 1032, + 20 + ], + [ + 1035, + 26 + ], + [ + 1036, + 31 + ], + [ + 1026, + 35 + ], + [ + 1024, + 43 + ], + [ + 1024, + 47 + ], + [ + 1023, + 50 + ], + [ + 1020, + 52 + ], + [ + 1015, + 56 + ], + [ + 1012, + 66 + ], + [ + 1011, + 70 + ], + [ + 1011, + 81 + ], + [ + 1011, + 86 + ], + [ + 1009, + 94 + ], + [ + 1008, + 100 + ], + [ + 1009, + 109 + ], + [ + 1015, + 120 + ], + [ + 1021, + 122 + ], + [ + 1029, + 118 + ], + [ + 1031, + 124 + ], + [ + 1024, + 124 + ], + [ + 1022, + 134 + ], + [ + 1022, + 146 + ], + [ + 1025, + 149 + ], + [ + 1030, + 147 + ], + [ + 1032, + 145 + ], + [ + 1036, + 148 + ], + [ + 1037, + 145 + ], + [ + 1037, + 140 + ], + [ + 1041, + 140 + ], + [ + 1045, + 141 + ], + [ + 1050, + 140 + ], + [ + 1055, + 138 + ], + [ + 1057, + 142 + ], + [ + 1060, + 143 + ], + [ + 1065, + 142 + ], + [ + 1072, + 138 + ], + [ + 1074, + 137 + ], + [ + 1075, + 141 + ], + [ + 1081, + 139 + ], + [ + 1087, + 132 + ], + [ + 1088, + 129 + ], + [ + 1087, + 126 + ], + [ + 1086, + 121 + ], + [ + 1087, + 121 + ], + [ + 1089, + 122 + ], + [ + 1093, + 126 + ], + [ + 1097, + 124 + ], + [ + 1100, + 122 + ], + [ + 1100, + 118 + ], + [ + 1103, + 121 + ], + [ + 1106, + 122 + ], + [ + 1109, + 123 + ], + [ + 1111, + 126 + ], + [ + 1107, + 126 + ], + [ + 1102, + 128 + ], + [ + 1098, + 131 + ], + [ + 1098, + 135 + ], + [ + 1102, + 137 + ], + [ + 1105, + 141 + ], + [ + 1099, + 143 + ], + [ + 1087, + 144 + ], + [ + 1091, + 150 + ], + [ + 1092, + 154 + ], + [ + 1086, + 155 + ], + [ + 1081, + 161 + ], + [ + 1071, + 166 + ], + [ + 1067, + 164 + ], + [ + 1067, + 160 + ], + [ + 1064, + 162 + ], + [ + 1063, + 166 + ], + [ + 1059, + 165 + ], + [ + 1054, + 162 + ], + [ + 1051, + 165 + ], + [ + 1048, + 173 + ], + [ + 1045, + 175 + ], + [ + 1041, + 175 + ], + [ + 1041, + 180 + ], + [ + 1037, + 177 + ], + [ + 1035, + 180 + ], + [ + 1036, + 186 + ], + [ + 1029, + 190 + ], + [ + 1028, + 183 + ], + [ + 1024, + 178 + ], + [ + 1020, + 177 + ], + [ + 1018, + 186 + ], + [ + 1015, + 190 + ], + [ + 1008, + 194 + ], + [ + 1006, + 189 + ], + [ + 1003, + 189 + ], + [ + 998, + 187 + ], + [ + 998, + 178 + ], + [ + 997, + 173 + ], + [ + 991, + 168 + ], + [ + 982, + 163 + ], + [ + 987, + 163 + ], + [ + 994, + 163 + ], + [ + 1001, + 163 + ], + [ + 1005, + 162 + ], + [ + 998, + 156 + ], + [ + 994, + 151 + ], + [ + 986, + 148 + ], + [ + 987, + 144 + ], + [ + 990, + 140 + ], + [ + 988, + 137 + ], + [ + 979, + 131 + ], + [ + 974, + 131 + ], + [ + 974, + 126 + ], + [ + 976, + 125 + ], + [ + 977, + 120 + ], + [ + 972, + 116 + ], + [ + 965, + 108 + ], + [ + 958, + 103 + ], + [ + 961, + 101 + ], + [ + 964, + 101 + ], + [ + 969, + 97 + ], + [ + 972, + 90 + ], + [ + 967, + 79 + ], + [ + 963, + 74 + ], + [ + 960, + 76 + ], + [ + 958, + 74 + ], + [ + 956, + 70 + ], + [ + 947, + 64 + ], + [ + 943, + 65 + ], + [ + 939, + 67 + ], + [ + 937, + 67 + ], + [ + 937, + 63 + ], + [ + 935, + 57 + ], + [ + 935, + 52 + ], + [ + 933, + 43 + ], + [ + 922, + 41 + ], + [ + 921, + 39 + ], + [ + 922, + 33 + ], + [ + 926, + 27 + ], + [ + 928, + 23 + ], + [ + 922, + 15 + ], + [ + 912, + 8 + ], + [ + 916, + 0 + ], + [ + 803, + 0 + ], + [ + 799, + 6 + ], + [ + 791, + 13 + ], + [ + 784, + 16 + ], + [ + 783, + 23 + ], + [ + 782, + 30 + ], + [ + 781, + 42 + ], + [ + 783, + 52 + ], + [ + 785, + 56 + ], + [ + 789, + 62 + ], + [ + 791, + 63 + ], + [ + 795, + 63 + ], + [ + 803, + 63 + ], + [ + 810, + 65 + ], + [ + 813, + 68 + ], + [ + 818, + 72 + ], + [ + 826, + 73 + ], + [ + 830, + 77 + ], + [ + 831, + 84 + ], + [ + 822, + 90 + ], + [ + 814, + 93 + ], + [ + 803, + 99 + ], + [ + 802, + 118 + ], + [ + 799, + 125 + ], + [ + 796, + 133 + ], + [ + 809, + 134 + ], + [ + 811, + 135 + ], + [ + 812, + 142 + ], + [ + 817, + 151 + ], + [ + 821, + 152 + ], + [ + 819, + 165 + ], + [ + 819, + 173 + ], + [ + 813, + 179 + ], + [ + 805, + 188 + ], + [ + 801, + 196 + ], + [ + 799, + 198 + ], + [ + 795, + 185 + ], + [ + 791, + 176 + ], + [ + 772, + 150 + ], + [ + 757, + 149 + ], + [ + 748, + 152 + ], + [ + 742, + 146 + ], + [ + 740, + 138 + ], + [ + 721, + 130 + ], + [ + 727, + 128 + ], + [ + 726, + 117 + ], + [ + 723, + 108 + ], + [ + 719, + 108 + ], + [ + 711, + 110 + ], + [ + 709, + 110 + ], + [ + 707, + 100 + ], + [ + 711, + 95 + ], + [ + 712, + 90 + ], + [ + 704, + 86 + ], + [ + 703, + 80 + ], + [ + 707, + 79 + ], + [ + 707, + 73 + ], + [ + 697, + 70 + ], + [ + 685, + 75 + ], + [ + 681, + 77 + ], + [ + 676, + 72 + ], + [ + 669, + 76 + ], + [ + 669, + 71 + ], + [ + 662, + 66 + ], + [ + 660, + 72 + ], + [ + 652, + 71 + ], + [ + 650, + 70 + ], + [ + 643, + 78 + ], + [ + 645, + 82 + ], + [ + 635, + 66 + ], + [ + 631, + 60 + ], + [ + 628, + 64 + ], + [ + 628, + 73 + ], + [ + 624, + 81 + ], + [ + 619, + 90 + ], + [ + 618, + 91 + ], + [ + 613, + 75 + ], + [ + 609, + 69 + ], + [ + 608, + 62 + ], + [ + 601, + 61 + ], + [ + 593, + 59 + ], + [ + 586, + 56 + ], + [ + 579, + 68 + ], + [ + 583, + 74 + ], + [ + 584, + 83 + ], + [ + 583, + 89 + ], + [ + 579, + 96 + ], + [ + 575, + 99 + ], + [ + 574, + 95 + ], + [ + 575, + 88 + ], + [ + 569, + 82 + ], + [ + 564, + 84 + ], + [ + 561, + 83 + ], + [ + 557, + 86 + ], + [ + 560, + 92 + ], + [ + 564, + 99 + ], + [ + 567, + 107 + ], + [ + 562, + 105 + ], + [ + 558, + 103 + ], + [ + 554, + 112 + ], + [ + 555, + 118 + ], + [ + 556, + 121 + ], + [ + 558, + 118 + ], + [ + 559, + 117 + ], + [ + 559, + 126 + ], + [ + 553, + 127 + ], + [ + 548, + 126 + ], + [ + 551, + 120 + ], + [ + 547, + 112 + ], + [ + 541, + 122 + ], + [ + 539, + 133 + ], + [ + 537, + 149 + ], + [ + 538, + 164 + ], + [ + 536, + 173 + ], + [ + 540, + 180 + ], + [ + 542, + 186 + ], + [ + 548, + 187 + ], + [ + 553, + 188 + ], + [ + 559, + 185 + ], + [ + 564, + 183 + ], + [ + 567, + 178 + ], + [ + 572, + 178 + ], + [ + 571, + 186 + ], + [ + 565, + 196 + ], + [ + 570, + 209 + ], + [ + 577, + 209 + ], + [ + 580, + 212 + ], + [ + 569, + 219 + ], + [ + 565, + 214 + ], + [ + 561, + 212 + ], + [ + 559, + 205 + ], + [ + 552, + 202 + ], + [ + 537, + 207 + ], + [ + 531, + 212 + ], + [ + 522, + 215 + ], + [ + 521, + 218 + ], + [ + 525, + 224 + ], + [ + 524, + 230 + ], + [ + 527, + 233 + ], + [ + 533, + 232 + ], + [ + 529, + 237 + ], + [ + 522, + 238 + ], + [ + 515, + 242 + ], + [ + 515, + 247 + ], + [ + 518, + 251 + ], + [ + 527, + 253 + ], + [ + 534, + 251 + ], + [ + 546, + 251 + ], + [ + 546, + 252 + ], + [ + 547, + 260 + ], + [ + 549, + 268 + ], + [ + 550, + 280 + ], + [ + 550, + 287 + ], + [ + 557, + 285 + ], + [ + 560, + 282 + ], + [ + 569, + 284 + ], + [ + 573, + 284 + ], + [ + 581, + 283 + ], + [ + 582, + 276 + ], + [ + 583, + 271 + ], + [ + 590, + 273 + ], + [ + 595, + 281 + ], + [ + 604, + 293 + ], + [ + 608, + 295 + ], + [ + 629, + 300 + ], + [ + 635, + 303 + ], + [ + 638, + 336 + ], + [ + 643, + 409 + ], + [ + 650, + 411 + ], + [ + 648, + 350 + ], + [ + 653, + 349 + ], + [ + 659, + 355 + ], + [ + 662, + 359 + ], + [ + 669, + 356 + ], + [ + 682, + 349 + ], + [ + 690, + 345 + ], + [ + 697, + 338 + ], + [ + 696, + 333 + ], + [ + 687, + 327 + ], + [ + 683, + 318 + ], + [ + 678, + 318 + ], + [ + 672, + 320 + ], + [ + 667, + 309 + ], + [ + 680, + 295 + ], + [ + 680, + 300 + ], + [ + 682, + 306 + ], + [ + 688, + 303 + ], + [ + 692, + 303 + ], + [ + 699, + 302 + ], + [ + 706, + 300 + ], + [ + 703, + 307 + ], + [ + 703, + 310 + ], + [ + 705, + 311 + ], + [ + 712, + 311 + ], + [ + 718, + 309 + ], + [ + 727, + 312 + ], + [ + 734, + 311 + ], + [ + 735, + 305 + ], + [ + 741, + 298 + ], + [ + 750, + 298 + ], + [ + 758, + 297 + ], + [ + 762, + 290 + ], + [ + 769, + 287 + ], + [ + 779, + 281 + ], + [ + 783, + 278 + ], + [ + 787, + 272 + ], + [ + 779, + 263 + ], + [ + 781, + 257 + ], + [ + 788, + 254 + ], + [ + 800, + 251 + ], + [ + 807, + 248 + ], + [ + 812, + 256 + ], + [ + 812, + 266 + ], + [ + 803, + 289 + ], + [ + 807, + 303 + ], + [ + 810, + 323 + ], + [ + 799, + 337 + ], + [ + 799, + 344 + ], + [ + 804, + 350 + ], + [ + 811, + 359 + ], + [ + 817, + 365 + ], + [ + 824, + 367 + ], + [ + 829, + 367 + ], + [ + 835, + 362 + ], + [ + 839, + 360 + ], + [ + 840, + 391 + ], + [ + 852, + 391 + ], + [ + 852, + 387 + ], + [ + 851, + 367 + ], + [ + 856, + 368 + ], + [ + 864, + 368 + ], + [ + 867, + 368 + ], + [ + 869, + 368 + ], + [ + 870, + 368 + ], + [ + 873, + 372 + ], + [ + 875, + 380 + ], + [ + 879, + 384 + ], + [ + 882, + 393 + ], + [ + 885, + 397 + ], + [ + 888, + 401 + ], + [ + 892, + 402 + ], + [ + 896, + 403 + ], + [ + 900, + 401 + ], + [ + 903, + 395 + ], + [ + 907, + 391 + ], + [ + 913, + 387 + ], + [ + 917, + 384 + ], + [ + 918, + 378 + ], + [ + 920, + 369 + ], + [ + 922, + 359 + ], + [ + 926, + 341 + ], + [ + 928, + 335 + ], + [ + 932, + 331 + ], + [ + 935, + 332 + ], + [ + 940, + 327 + ], + [ + 946, + 328 + ], + [ + 952, + 327 + ], + [ + 961, + 322 + ], + [ + 976, + 321 + ], + [ + 981, + 324 + ], + [ + 988, + 329 + ], + [ + 998, + 339 + ], + [ + 1010, + 353 + ], + [ + 1009, + 398 + ], + [ + 1020, + 399 + ], + [ + 1020, + 380 + ], + [ + 1024, + 362 + ], + [ + 1027, + 353 + ], + [ + 1032, + 347 + ], + [ + 1033, + 351 + ], + [ + 1036, + 351 + ], + [ + 1040, + 348 + ], + [ + 1041, + 345 + ], + [ + 1043, + 349 + ], + [ + 1048, + 348 + ], + [ + 1049, + 345 + ], + [ + 1048, + 341 + ], + [ + 1053, + 333 + ], + [ + 1056, + 331 + ], + [ + 1061, + 334 + ], + [ + 1061, + 338 + ], + [ + 1062, + 347 + ], + [ + 1068, + 349 + ], + [ + 1079, + 350 + ], + [ + 1085, + 347 + ], + [ + 1092, + 348 + ], + [ + 1098, + 348 + ], + [ + 1102, + 348 + ], + [ + 1110, + 346 + ], + [ + 1115, + 344 + ], + [ + 1109, + 341 + ], + [ + 1103, + 337 + ], + [ + 1103, + 332 + ], + [ + 1108, + 331 + ], + [ + 1113, + 329 + ], + [ + 1119, + 335 + ], + [ + 1129, + 334 + ], + [ + 1136, + 331 + ], + [ + 1143, + 321 + ], + [ + 1145, + 303 + ], + [ + 1145, + 291 + ], + [ + 1148, + 280 + ], + [ + 1148, + 272 + ], + [ + 1150, + 266 + ], + [ + 1163, + 272 + ], + [ + 1157, + 277 + ], + [ + 1163, + 281 + ], + [ + 1164, + 286 + ], + [ + 1168, + 290 + ], + [ + 1175, + 288 + ], + [ + 1180, + 292 + ], + [ + 1176, + 302 + ], + [ + 1173, + 317 + ], + [ + 1178, + 320 + ], + [ + 1184, + 317 + ], + [ + 1190, + 315 + ], + [ + 1196, + 324 + ], + [ + 1203, + 325 + ], + [ + 1207, + 327 + ], + [ + 1209, + 327 + ], + [ + 1215, + 322 + ], + [ + 1223, + 321 + ], + [ + 1229, + 322 + ], + [ + 1236, + 324 + ], + [ + 1244, + 346 + ], + [ + 1249, + 385 + ], + [ + 1251, + 446 + ], + [ + 1272, + 446 + ], + [ + 1264, + 400 + ], + [ + 1261, + 377 + ], + [ + 1255, + 352 + ], + [ + 1249, + 332 + ], + [ + 1246, + 318 + ], + [ + 1248, + 309 + ], + [ + 1251, + 307 + ], + [ + 1255, + 305 + ], + [ + 1254, + 298 + ], + [ + 1257, + 294 + ], + [ + 1261, + 302 + ], + [ + 1268, + 304 + ], + [ + 1271, + 309 + ], + [ + 1272, + 317 + ], + [ + 1275, + 322 + ], + [ + 1275, + 306 + ], + [ + 1276, + 299 + ], + [ + 1274, + 290 + ], + [ + 1270, + 285 + ], + [ + 1277, + 285 + ], + [ + 1279, + 278 + ], + [ + 1279, + 274 + ], + [ + 1282, + 267 + ], + [ + 1287, + 263 + ], + [ + 1291, + 266 + ], + [ + 1291, + 276 + ], + [ + 1294, + 284 + ], + [ + 1300, + 284 + ], + [ + 1301, + 278 + ], + [ + 1302, + 266 + ], + [ + 1305, + 258 + ], + [ + 1309, + 273 + ], + [ + 1310, + 299 + ], + [ + 1313, + 337 + ], + [ + 1316, + 355 + ], + [ + 1317, + 368 + ], + [ + 1315, + 398 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1188, + 406 + ], + [ + 1189, + 343 + ], + [ + 1230, + 343 + ], + [ + 1235, + 343 + ], + [ + 1234, + 402 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1223, + 385 + ], + [ + 1186, + 385 + ], + [ + 1185, + 392 + ], + [ + 1182, + 410 + ], + [ + 1181, + 442 + ], + [ + 1181, + 454 + ], + [ + 1183, + 456 + ], + [ + 1185, + 457 + ], + [ + 1186, + 456 + ], + [ + 1188, + 454 + ], + [ + 1196, + 454 + ], + [ + 1244, + 454 + ], + [ + 1248, + 456 + ], + [ + 1253, + 456 + ], + [ + 1256, + 450 + ], + [ + 1258, + 424 + ], + [ + 1253, + 393 + ], + [ + 1251, + 385 + ], + [ + 1230, + 385 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1124, + 398 + ], + [ + 1122, + 264 + ], + [ + 1125, + 264 + ], + [ + 1127, + 409 + ], + [ + 1123, + 409 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 856, + 400 + ], + [ + 864, + 393 + ], + [ + 868, + 391 + ], + [ + 876, + 391 + ], + [ + 888, + 397 + ], + [ + 894, + 400 + ], + [ + 904, + 400 + ], + [ + 917, + 400 + ], + [ + 927, + 403 + ], + [ + 930, + 409 + ], + [ + 930, + 421 + ], + [ + 927, + 428 + ], + [ + 925, + 434 + ], + [ + 914, + 436 + ], + [ + 894, + 435 + ], + [ + 888, + 435 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1030, + 399 + ], + [ + 1030, + 378 + ], + [ + 1019, + 379 + ], + [ + 1020, + 399 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1054, + 370 + ], + [ + 1073, + 372 + ], + [ + 1081, + 377 + ], + [ + 1090, + 379 + ], + [ + 1094, + 381 + ], + [ + 1095, + 385 + ], + [ + 1093, + 390 + ], + [ + 1090, + 392 + ], + [ + 1086, + 393 + ], + [ + 1070, + 394 + ], + [ + 1056, + 394 + ], + [ + 1049, + 390 + ], + [ + 1048, + 385 + ], + [ + 1048, + 377 + ], + [ + 1050, + 373 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1046, + 390 + ], + [ + 1039, + 397 + ], + [ + 1040, + 403 + ], + [ + 1041, + 409 + ], + [ + 1047, + 411 + ], + [ + 1057, + 411 + ], + [ + 1066, + 410 + ], + [ + 1076, + 408 + ], + [ + 1077, + 401 + ], + [ + 1077, + 395 + ], + [ + 1074, + 391 + ], + [ + 1070, + 386 + ], + [ + 1062, + 385 + ], + [ + 1052, + 385 + ], + [ + 1049, + 386 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1338, + 455 + ], + [ + 1295, + 457 + ], + [ + 1294, + 462 + ], + [ + 1286, + 463 + ], + [ + 1288, + 472 + ], + [ + 1293, + 476 + ], + [ + 1319, + 478 + ], + [ + 1329, + 478 + ], + [ + 1338, + 478 + ], + [ + 1338, + 466 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1350, + 367 + ], + [ + 1340, + 371 + ], + [ + 1334, + 374 + ], + [ + 1324, + 372 + ], + [ + 1316, + 374 + ], + [ + 1309, + 377 + ], + [ + 1296, + 380 + ], + [ + 1284, + 386 + ], + [ + 1275, + 389 + ], + [ + 1269, + 399 + ], + [ + 1265, + 414 + ], + [ + 1266, + 419 + ], + [ + 1269, + 430 + ], + [ + 1275, + 434 + ], + [ + 1279, + 439 + ], + [ + 1289, + 444 + ], + [ + 1293, + 449 + ], + [ + 1297, + 454 + ], + [ + 1301, + 458 + ], + [ + 1316, + 458 + ], + [ + 1328, + 458 + ], + [ + 1334, + 453 + ], + [ + 1341, + 444 + ], + [ + 1344, + 430 + ], + [ + 1342, + 418 + ], + [ + 1347, + 409 + ], + [ + 1358, + 397 + ], + [ + 1360, + 387 + ], + [ + 1364, + 381 + ], + [ + 1358, + 377 + ], + [ + 1356, + 370 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1326, + 438 + ], + [ + 1326, + 477 + ], + [ + 1332, + 477 + ], + [ + 1333, + 424 + ], + [ + 1326, + 425 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 934, + 337 + ], + [ + 935, + 409 + ], + [ + 937, + 409 + ], + [ + 936, + 336 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 972, + 300 + ], + [ + 972, + 402 + ], + [ + 973, + 402 + ], + [ + 973, + 300 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 990, + 353 + ], + [ + 990, + 374 + ], + [ + 970, + 374 + ], + [ + 971, + 353 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 977, + 362 + ], + [ + 978, + 400 + ], + [ + 980, + 400 + ], + [ + 979, + 353 + ], + [ + 978, + 353 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 931, + 362 + ], + [ + 931, + 376 + ], + [ + 937, + 377 + ], + [ + 937, + 362 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 720, + 410 + ], + [ + 674, + 409 + ], + [ + 670, + 414 + ], + [ + 665, + 426 + ], + [ + 664, + 437 + ], + [ + 665, + 444 + ], + [ + 668, + 450 + ], + [ + 670, + 454 + ], + [ + 678, + 456 + ], + [ + 682, + 456 + ], + [ + 694, + 457 + ], + [ + 708, + 457 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 615, + 408 + ], + [ + 645, + 408 + ], + [ + 654, + 409 + ], + [ + 666, + 421 + ], + [ + 671, + 435 + ], + [ + 671, + 447 + ], + [ + 670, + 457 + ], + [ + 670, + 461 + ], + [ + 669, + 462 + ], + [ + 666, + 463 + ], + [ + 662, + 461 + ], + [ + 659, + 460 + ], + [ + 650, + 458 + ], + [ + 647, + 462 + ], + [ + 642, + 463 + ], + [ + 640, + 462 + ], + [ + 635, + 453 + ], + [ + 615, + 434 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 611, + 399 + ], + [ + 584, + 400 + ], + [ + 559, + 403 + ], + [ + 549, + 410 + ], + [ + 540, + 424 + ], + [ + 533, + 441 + ], + [ + 531, + 451 + ], + [ + 532, + 463 + ], + [ + 535, + 470 + ], + [ + 540, + 471 + ], + [ + 547, + 470 + ], + [ + 548, + 466 + ], + [ + 559, + 466 + ], + [ + 562, + 467 + ], + [ + 574, + 468 + ], + [ + 576, + 465 + ], + [ + 587, + 465 + ], + [ + 595, + 465 + ], + [ + 600, + 465 + ], + [ + 605, + 469 + ], + [ + 611, + 469 + ], + [ + 615, + 466 + ], + [ + 629, + 464 + ], + [ + 634, + 465 + ], + [ + 640, + 464 + ], + [ + 643, + 456 + ], + [ + 641, + 445 + ], + [ + 638, + 429 + ], + [ + 629, + 413 + ], + [ + 617, + 401 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 451, + 432 + ], + [ + 442, + 416 + ], + [ + 429, + 407 + ], + [ + 414, + 407 + ], + [ + 425, + 425 + ], + [ + 448, + 444 + ], + [ + 448, + 441 + ], + [ + 450, + 437 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 955, + 400 + ], + [ + 949, + 400 + ], + [ + 947, + 401 + ], + [ + 941, + 399 + ], + [ + 941, + 414 + ], + [ + 949, + 417 + ], + [ + 951, + 414 + ], + [ + 956, + 414 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1178, + 398 + ], + [ + 1168, + 402 + ], + [ + 1166, + 389 + ], + [ + 1176, + 387 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 830, + 385 + ], + [ + 755, + 387 + ], + [ + 739, + 388 + ], + [ + 727, + 397 + ], + [ + 708, + 421 + ], + [ + 704, + 422 + ], + [ + 697, + 423 + ], + [ + 695, + 429 + ], + [ + 697, + 438 + ], + [ + 693, + 444 + ], + [ + 682, + 467 + ], + [ + 679, + 485 + ], + [ + 678, + 509 + ], + [ + 678, + 532 + ], + [ + 680, + 547 + ], + [ + 685, + 552 + ], + [ + 694, + 556 + ], + [ + 704, + 556 + ], + [ + 715, + 554 + ], + [ + 716, + 549 + ], + [ + 719, + 538 + ], + [ + 726, + 538 + ], + [ + 729, + 537 + ], + [ + 740, + 536 + ], + [ + 776, + 540 + ], + [ + 815, + 538 + ], + [ + 849, + 538 + ], + [ + 858, + 540 + ], + [ + 862, + 544 + ], + [ + 866, + 549 + ], + [ + 870, + 553 + ], + [ + 876, + 555 + ], + [ + 883, + 556 + ], + [ + 889, + 555 + ], + [ + 895, + 552 + ], + [ + 898, + 545 + ], + [ + 899, + 540 + ], + [ + 902, + 537 + ], + [ + 910, + 537 + ], + [ + 919, + 536 + ], + [ + 923, + 530 + ], + [ + 924, + 510 + ], + [ + 922, + 483 + ], + [ + 921, + 466 + ], + [ + 914, + 450 + ], + [ + 904, + 439 + ], + [ + 909, + 438 + ], + [ + 915, + 438 + ], + [ + 918, + 438 + ], + [ + 922, + 436 + ], + [ + 923, + 431 + ], + [ + 918, + 425 + ], + [ + 912, + 423 + ], + [ + 908, + 423 + ], + [ + 905, + 423 + ], + [ + 901, + 426 + ], + [ + 898, + 426 + ], + [ + 886, + 408 + ], + [ + 874, + 393 + ], + [ + 858, + 388 + ], + [ + 842, + 386 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 738, + 456 + ], + [ + 739, + 474 + ], + [ + 807, + 473 + ], + [ + 807, + 456 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 555, + 53 + ], + [ + 503, + 61 + ], + [ + 495, + 66 + ], + [ + 494, + 74 + ], + [ + 498, + 408 + ], + [ + 504, + 411 + ], + [ + 497, + 75 + ], + [ + 498, + 70 + ], + [ + 506, + 66 + ], + [ + 557, + 57 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 620, + 48 + ], + [ + 549, + 51 + ], + [ + 553, + 59 + ], + [ + 624, + 54 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 423, + 415 + ], + [ + 424, + 404 + ], + [ + 432, + 399 + ], + [ + 443, + 399 + ], + [ + 450, + 401 + ], + [ + 473, + 401 + ], + [ + 496, + 402 + ], + [ + 501, + 405 + ], + [ + 506, + 411 + ], + [ + 510, + 424 + ], + [ + 513, + 432 + ], + [ + 520, + 451 + ], + [ + 520, + 460 + ], + [ + 501, + 471 + ], + [ + 482, + 472 + ], + [ + 462, + 473 + ], + [ + 447, + 461 + ], + [ + 436, + 450 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 120, + 316 + ], + [ + 252, + 323 + ], + [ + 328, + 339 + ], + [ + 381, + 368 + ], + [ + 426, + 408 + ], + [ + 465, + 455 + ], + [ + 478, + 474 + ], + [ + 476, + 454 + ], + [ + 486, + 452 + ], + [ + 518, + 457 + ], + [ + 525, + 462 + ], + [ + 529, + 477 + ], + [ + 533, + 491 + ], + [ + 527, + 495 + ], + [ + 522, + 498 + ], + [ + 541, + 508 + ], + [ + 557, + 522 + ], + [ + 574, + 547 + ], + [ + 582, + 568 + ], + [ + 585, + 601 + ], + [ + 585, + 609 + ], + [ + 581, + 647 + ], + [ + 580, + 681 + ], + [ + 571, + 723 + ], + [ + 563, + 739 + ], + [ + 548, + 748 + ], + [ + 526, + 752 + ], + [ + 503, + 750 + ], + [ + 485, + 733 + ], + [ + 483, + 724 + ], + [ + 324, + 802 + ], + [ + 317, + 861 + ], + [ + 297, + 903 + ], + [ + 265, + 924 + ], + [ + 227, + 928 + ], + [ + 197, + 920 + ], + [ + 185, + 908 + ], + [ + 179, + 883 + ], + [ + 181, + 830 + ], + [ + 0, + 845 + ], + [ + 0, + 324 + ], + [ + 0, + 315 + ], + [ + 25, + 315 + ], + [ + 75, + 316 + ], + [ + 104, + 316 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 244, + 37 + ], + [ + 245, + 0 + ], + [ + 291, + 0 + ], + [ + 292, + 38 + ], + [ + 289, + 43 + ], + [ + 284, + 44 + ], + [ + 253, + 45 + ], + [ + 247, + 41 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 9, + 17 + ], + [ + 81, + 0 + ], + [ + 52, + 0 + ], + [ + 0, + 15 + ], + [ + 0, + 22 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 21, + 37 + ], + [ + 23, + 103 + ], + [ + 0, + 103 + ], + [ + 0, + 30 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1088, + 405 + ], + [ + 1104, + 403 + ], + [ + 1112, + 405 + ], + [ + 1111, + 411 + ], + [ + 1088, + 412 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1103, + 383 + ], + [ + 1103, + 405 + ], + [ + 1104, + 404 + ], + [ + 1104, + 383 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1101, + 391 + ], + [ + 1100, + 393 + ], + [ + 1101, + 396 + ], + [ + 1103, + 397 + ], + [ + 1106, + 396 + ], + [ + 1108, + 394 + ], + [ + 1107, + 390 + ], + [ + 1103, + 388 + ], + [ + 1102, + 389 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..8a9e51270d1c36c3e0bbe2b10e2e0a30822deb3c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..261aa28eb987d9c77a5a1531331c874023973917 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..e82234e440b35652ef1bd165cc5752a806ca864e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..87bb5496379696f61ef3602b94d6b6299b893c94 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000203_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000205_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000205_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..51b19e5f5dfbbb9e2a6798bab896f2394a042137 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000205_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..3fede87b4c90e094f189b146a1dcab65a5061940 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..1e52a20942b6cc5abcc6deee5c82c60263b70567 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000206_000019_gtFine_polygons.json @@ -0,0 +1,4711 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 155, + 301 + ], + [ + 2048, + 337 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 283 + ], + [ + 0, + 289 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1876, + 1024 + ], + [ + 1567, + 805 + ], + [ + 1394, + 675 + ], + [ + 1399, + 654 + ], + [ + 1557, + 644 + ], + [ + 1735, + 664 + ], + [ + 1952, + 792 + ], + [ + 2048, + 887 + ], + [ + 2048, + 930 + ], + [ + 2048, + 1024 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 628, + 86 + ], + [ + 1019, + 258 + ], + [ + 1233, + 212 + ], + [ + 1186, + 0 + ], + [ + 697, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1614, + 584 + ], + [ + 1567, + 582 + ], + [ + 1473, + 588 + ], + [ + 1426, + 601 + ], + [ + 1382, + 614 + ], + [ + 1357, + 628 + ], + [ + 1353, + 648 + ], + [ + 1369, + 659 + ], + [ + 1397, + 673 + ], + [ + 1476, + 686 + ], + [ + 1575, + 701 + ], + [ + 1647, + 637 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 1673, + 506 + ], + [ + 1565, + 506 + ], + [ + 1451, + 498 + ], + [ + 1313, + 487 + ], + [ + 1170, + 482 + ], + [ + 1175, + 475 + ], + [ + 1203, + 464 + ], + [ + 1246, + 458 + ], + [ + 1278, + 444 + ], + [ + 1331, + 429 + ], + [ + 1390, + 426 + ], + [ + 1438, + 422 + ], + [ + 1610, + 420 + ], + [ + 1729, + 478 + ], + [ + 1743, + 526 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 362, + 487 + ], + [ + 408, + 493 + ], + [ + 472, + 494 + ], + [ + 531, + 489 + ], + [ + 568, + 488 + ], + [ + 605, + 482 + ], + [ + 626, + 481 + ], + [ + 642, + 478 + ], + [ + 658, + 475 + ], + [ + 682, + 464 + ], + [ + 671, + 453 + ], + [ + 637, + 439 + ], + [ + 421, + 419 + ], + [ + 341, + 437 + ], + [ + 311, + 468 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 286, + 473 + ], + [ + 596, + 444 + ], + [ + 700, + 445 + ], + [ + 1180, + 415 + ], + [ + 1221, + 412 + ], + [ + 1254, + 412 + ], + [ + 1426, + 434 + ], + [ + 1456, + 436 + ], + [ + 1496, + 446 + ], + [ + 1530, + 451 + ], + [ + 1565, + 449 + ], + [ + 1603, + 461 + ], + [ + 1647, + 471 + ], + [ + 1688, + 474 + ], + [ + 2048, + 356 + ], + [ + 2048, + 0 + ], + [ + 1423, + 0 + ], + [ + 1227, + 210 + ], + [ + 1079, + 167 + ], + [ + 1076, + 154 + ], + [ + 1068, + 153 + ], + [ + 1053, + 157 + ], + [ + 1027, + 153 + ], + [ + 1015, + 141 + ], + [ + 996, + 110 + ], + [ + 995, + 118 + ], + [ + 974, + 119 + ], + [ + 974, + 124 + ], + [ + 970, + 124 + ], + [ + 969, + 103 + ], + [ + 952, + 103 + ], + [ + 947, + 110 + ], + [ + 935, + 99 + ], + [ + 933, + 82 + ], + [ + 929, + 77 + ], + [ + 929, + 70 + ], + [ + 912, + 57 + ], + [ + 817, + 0 + ], + [ + 0, + 0 + ], + [ + 0, + 466 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 698, + 404 + ], + [ + 663, + 406 + ], + [ + 647, + 415 + ], + [ + 653, + 435 + ], + [ + 669, + 455 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 678, + 489 + ], + [ + 669, + 441 + ], + [ + 668, + 330 + ], + [ + 660, + 324 + ], + [ + 668, + 320 + ], + [ + 668, + 300 + ], + [ + 655, + 307 + ], + [ + 637, + 313 + ], + [ + 631, + 313 + ], + [ + 616, + 317 + ], + [ + 604, + 325 + ], + [ + 594, + 331 + ], + [ + 611, + 303 + ], + [ + 623, + 293 + ], + [ + 629, + 276 + ], + [ + 613, + 285 + ], + [ + 598, + 291 + ], + [ + 578, + 301 + ], + [ + 565, + 305 + ], + [ + 582, + 286 + ], + [ + 593, + 275 + ], + [ + 581, + 277 + ], + [ + 577, + 277 + ], + [ + 579, + 270 + ], + [ + 579, + 264 + ], + [ + 573, + 266 + ], + [ + 557, + 270 + ], + [ + 555, + 271 + ], + [ + 565, + 262 + ], + [ + 568, + 251 + ], + [ + 557, + 249 + ], + [ + 551, + 244 + ], + [ + 542, + 246 + ], + [ + 521, + 240 + ], + [ + 519, + 235 + ], + [ + 522, + 224 + ], + [ + 511, + 216 + ], + [ + 500, + 215 + ], + [ + 505, + 209 + ], + [ + 514, + 206 + ], + [ + 514, + 200 + ], + [ + 518, + 197 + ], + [ + 520, + 194 + ], + [ + 514, + 186 + ], + [ + 508, + 179 + ], + [ + 500, + 173 + ], + [ + 499, + 167 + ], + [ + 507, + 169 + ], + [ + 523, + 168 + ], + [ + 534, + 169 + ], + [ + 540, + 169 + ], + [ + 544, + 167 + ], + [ + 538, + 160 + ], + [ + 528, + 157 + ], + [ + 513, + 153 + ], + [ + 509, + 150 + ], + [ + 523, + 147 + ], + [ + 540, + 146 + ], + [ + 540, + 140 + ], + [ + 526, + 135 + ], + [ + 524, + 124 + ], + [ + 524, + 119 + ], + [ + 531, + 123 + ], + [ + 535, + 126 + ], + [ + 549, + 129 + ], + [ + 553, + 129 + ], + [ + 560, + 126 + ], + [ + 554, + 123 + ], + [ + 547, + 118 + ], + [ + 545, + 109 + ], + [ + 539, + 104 + ], + [ + 520, + 101 + ], + [ + 513, + 102 + ], + [ + 495, + 101 + ], + [ + 503, + 93 + ], + [ + 523, + 92 + ], + [ + 530, + 90 + ], + [ + 524, + 83 + ], + [ + 506, + 74 + ], + [ + 504, + 64 + ], + [ + 516, + 69 + ], + [ + 527, + 74 + ], + [ + 536, + 77 + ], + [ + 542, + 77 + ], + [ + 534, + 73 + ], + [ + 530, + 67 + ], + [ + 539, + 64 + ], + [ + 551, + 63 + ], + [ + 553, + 55 + ], + [ + 552, + 47 + ], + [ + 543, + 48 + ], + [ + 540, + 43 + ], + [ + 542, + 40 + ], + [ + 549, + 38 + ], + [ + 551, + 33 + ], + [ + 543, + 30 + ], + [ + 534, + 26 + ], + [ + 528, + 23 + ], + [ + 536, + 19 + ], + [ + 538, + 13 + ], + [ + 529, + 4 + ], + [ + 525, + 0 + ], + [ + 538, + 0 + ], + [ + 547, + 13 + ], + [ + 557, + 20 + ], + [ + 565, + 26 + ], + [ + 573, + 24 + ], + [ + 580, + 22 + ], + [ + 583, + 26 + ], + [ + 593, + 26 + ], + [ + 590, + 19 + ], + [ + 579, + 10 + ], + [ + 573, + 0 + ], + [ + 783, + 0 + ], + [ + 782, + 6 + ], + [ + 795, + 13 + ], + [ + 801, + 9 + ], + [ + 810, + 2 + ], + [ + 816, + 0 + ], + [ + 812, + 6 + ], + [ + 809, + 13 + ], + [ + 809, + 25 + ], + [ + 809, + 34 + ], + [ + 812, + 44 + ], + [ + 814, + 50 + ], + [ + 825, + 42 + ], + [ + 836, + 33 + ], + [ + 836, + 17 + ], + [ + 840, + 5 + ], + [ + 847, + 15 + ], + [ + 859, + 23 + ], + [ + 863, + 30 + ], + [ + 884, + 29 + ], + [ + 890, + 25 + ], + [ + 898, + 24 + ], + [ + 894, + 33 + ], + [ + 893, + 41 + ], + [ + 899, + 41 + ], + [ + 897, + 45 + ], + [ + 892, + 50 + ], + [ + 908, + 52 + ], + [ + 907, + 56 + ], + [ + 900, + 60 + ], + [ + 921, + 60 + ], + [ + 928, + 75 + ], + [ + 933, + 89 + ], + [ + 942, + 110 + ], + [ + 960, + 122 + ], + [ + 975, + 138 + ], + [ + 993, + 163 + ], + [ + 1004, + 173 + ], + [ + 1014, + 164 + ], + [ + 1016, + 152 + ], + [ + 1017, + 131 + ], + [ + 1020, + 124 + ], + [ + 1036, + 153 + ], + [ + 1044, + 145 + ], + [ + 1050, + 158 + ], + [ + 1054, + 177 + ], + [ + 1065, + 195 + ], + [ + 1071, + 205 + ], + [ + 1079, + 193 + ], + [ + 1084, + 183 + ], + [ + 1092, + 174 + ], + [ + 1087, + 166 + ], + [ + 1080, + 151 + ], + [ + 1102, + 158 + ], + [ + 1102, + 150 + ], + [ + 1110, + 144 + ], + [ + 1117, + 144 + ], + [ + 1125, + 130 + ], + [ + 1124, + 122 + ], + [ + 1112, + 125 + ], + [ + 1097, + 128 + ], + [ + 1086, + 131 + ], + [ + 1076, + 138 + ], + [ + 1062, + 132 + ], + [ + 1056, + 127 + ], + [ + 1039, + 129 + ], + [ + 1020, + 128 + ], + [ + 1012, + 120 + ], + [ + 996, + 99 + ], + [ + 978, + 75 + ], + [ + 991, + 64 + ], + [ + 973, + 58 + ], + [ + 972, + 50 + ], + [ + 984, + 50 + ], + [ + 975, + 39 + ], + [ + 987, + 45 + ], + [ + 1000, + 44 + ], + [ + 1018, + 44 + ], + [ + 1024, + 29 + ], + [ + 1029, + 14 + ], + [ + 1040, + 3 + ], + [ + 1050, + 0 + ], + [ + 1453, + 0 + ], + [ + 1457, + 9 + ], + [ + 1456, + 19 + ], + [ + 1465, + 22 + ], + [ + 1466, + 23 + ], + [ + 1462, + 29 + ], + [ + 1467, + 32 + ], + [ + 1472, + 32 + ], + [ + 1480, + 29 + ], + [ + 1511, + 36 + ], + [ + 1519, + 52 + ], + [ + 1516, + 87 + ], + [ + 1512, + 122 + ], + [ + 1509, + 141 + ], + [ + 1500, + 151 + ], + [ + 1485, + 156 + ], + [ + 1475, + 160 + ], + [ + 1455, + 167 + ], + [ + 1439, + 169 + ], + [ + 1428, + 171 + ], + [ + 1419, + 168 + ], + [ + 1412, + 161 + ], + [ + 1417, + 172 + ], + [ + 1433, + 181 + ], + [ + 1438, + 185 + ], + [ + 1447, + 192 + ], + [ + 1450, + 198 + ], + [ + 1440, + 196 + ], + [ + 1431, + 193 + ], + [ + 1443, + 208 + ], + [ + 1456, + 211 + ], + [ + 1471, + 216 + ], + [ + 1484, + 224 + ], + [ + 1501, + 230 + ], + [ + 1503, + 235 + ], + [ + 1481, + 239 + ], + [ + 1442, + 240 + ], + [ + 1445, + 258 + ], + [ + 1441, + 257 + ], + [ + 1429, + 250 + ], + [ + 1409, + 245 + ], + [ + 1426, + 257 + ], + [ + 1438, + 261 + ], + [ + 1445, + 264 + ], + [ + 1457, + 268 + ], + [ + 1460, + 277 + ], + [ + 1447, + 277 + ], + [ + 1426, + 273 + ], + [ + 1412, + 279 + ], + [ + 1412, + 284 + ], + [ + 1410, + 290 + ], + [ + 1406, + 290 + ], + [ + 1396, + 283 + ], + [ + 1385, + 282 + ], + [ + 1379, + 285 + ], + [ + 1387, + 292 + ], + [ + 1401, + 298 + ], + [ + 1415, + 301 + ], + [ + 1426, + 306 + ], + [ + 1428, + 318 + ], + [ + 1423, + 324 + ], + [ + 1403, + 325 + ], + [ + 1383, + 322 + ], + [ + 1370, + 325 + ], + [ + 1357, + 327 + ], + [ + 1343, + 341 + ], + [ + 1347, + 347 + ], + [ + 1356, + 360 + ], + [ + 1364, + 369 + ], + [ + 1384, + 369 + ], + [ + 1389, + 362 + ], + [ + 1396, + 371 + ], + [ + 1402, + 383 + ], + [ + 1401, + 398 + ], + [ + 1391, + 414 + ], + [ + 1373, + 412 + ], + [ + 1347, + 411 + ], + [ + 1325, + 414 + ], + [ + 1323, + 453 + ], + [ + 1306, + 453 + ], + [ + 1303, + 382 + ], + [ + 1280, + 404 + ], + [ + 1262, + 399 + ], + [ + 1251, + 391 + ], + [ + 1237, + 379 + ], + [ + 1231, + 377 + ], + [ + 1217, + 372 + ], + [ + 1210, + 380 + ], + [ + 1207, + 389 + ], + [ + 1199, + 393 + ], + [ + 1198, + 406 + ], + [ + 1197, + 413 + ], + [ + 1194, + 406 + ], + [ + 1194, + 390 + ], + [ + 1187, + 393 + ], + [ + 1177, + 392 + ], + [ + 1176, + 387 + ], + [ + 1169, + 380 + ], + [ + 1160, + 374 + ], + [ + 754, + 379 + ], + [ + 749, + 310 + ], + [ + 675, + 333 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 677, + 435 + ], + [ + 667, + 429 + ], + [ + 647, + 414 + ], + [ + 635, + 410 + ], + [ + 610, + 412 + ], + [ + 613, + 441 + ], + [ + 615, + 460 + ], + [ + 616, + 468 + ], + [ + 620, + 470 + ], + [ + 624, + 471 + ], + [ + 633, + 471 + ], + [ + 638, + 471 + ], + [ + 651, + 469 + ], + [ + 666, + 467 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 589, + 409 + ], + [ + 588, + 400 + ], + [ + 585, + 398 + ], + [ + 582, + 401 + ], + [ + 583, + 408 + ], + [ + 584, + 415 + ] + ] + }, + { + "label": "person", + "polygon": [ + [ + 479, + 393 + ], + [ + 492, + 405 + ], + [ + 486, + 410 + ], + [ + 476, + 408 + ], + [ + 472, + 401 + ], + [ + 474, + 398 + ], + [ + 477, + 395 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 608, + 408 + ], + [ + 582, + 408 + ], + [ + 556, + 408 + ], + [ + 555, + 415 + ], + [ + 578, + 460 + ], + [ + 580, + 470 + ], + [ + 593, + 473 + ], + [ + 607, + 472 + ], + [ + 614, + 469 + ], + [ + 624, + 466 + ], + [ + 638, + 462 + ], + [ + 641, + 453 + ], + [ + 639, + 433 + ], + [ + 634, + 422 + ], + [ + 621, + 413 + ], + [ + 615, + 409 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 536, + 402 + ], + [ + 488, + 403 + ], + [ + 469, + 404 + ], + [ + 447, + 406 + ], + [ + 422, + 419 + ], + [ + 403, + 430 + ], + [ + 368, + 436 + ], + [ + 346, + 444 + ], + [ + 358, + 481 + ], + [ + 380, + 484 + ], + [ + 405, + 481 + ], + [ + 419, + 480 + ], + [ + 431, + 480 + ], + [ + 438, + 478 + ], + [ + 447, + 476 + ], + [ + 500, + 476 + ], + [ + 510, + 476 + ], + [ + 520, + 476 + ], + [ + 524, + 476 + ], + [ + 528, + 474 + ], + [ + 535, + 474 + ], + [ + 542, + 475 + ], + [ + 559, + 475 + ], + [ + 576, + 473 + ], + [ + 582, + 473 + ], + [ + 589, + 472 + ], + [ + 589, + 452 + ], + [ + 587, + 434 + ], + [ + 585, + 429 + ], + [ + 566, + 409 + ], + [ + 555, + 406 + ], + [ + 548, + 406 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 552, + 440 + ], + [ + 555, + 482 + ], + [ + 559, + 483 + ], + [ + 558, + 431 + ], + [ + 552, + 431 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 423, + 249 + ], + [ + 423, + 258 + ], + [ + 431, + 259 + ], + [ + 431, + 251 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 369, + 278 + ], + [ + 371, + 285 + ], + [ + 390, + 286 + ], + [ + 391, + 274 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 414, + 263 + ], + [ + 348, + 263 + ], + [ + 350, + 279 + ], + [ + 413, + 277 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 686, + 96 + ], + [ + 697, + 106 + ], + [ + 699, + 147 + ], + [ + 687, + 154 + ], + [ + 660, + 157 + ], + [ + 658, + 97 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 436, + 238 + ], + [ + 414, + 235 + ], + [ + 413, + 251 + ], + [ + 438, + 253 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 424, + 483 + ], + [ + 421, + 407 + ], + [ + 413, + 193 + ], + [ + 417, + 171 + ], + [ + 428, + 158 + ], + [ + 455, + 147 + ], + [ + 571, + 116 + ], + [ + 666, + 111 + ], + [ + 667, + 106 + ], + [ + 577, + 113 + ], + [ + 480, + 135 + ], + [ + 432, + 149 + ], + [ + 417, + 161 + ], + [ + 410, + 179 + ], + [ + 410, + 199 + ], + [ + 410, + 232 + ], + [ + 412, + 298 + ], + [ + 415, + 430 + ], + [ + 415, + 475 + ], + [ + 414, + 483 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 389, + 301 + ], + [ + 370, + 345 + ], + [ + 419, + 345 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 385, + 317 + ], + [ + 416, + 316 + ], + [ + 416, + 318 + ], + [ + 385, + 319 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 409, + 293 + ], + [ + 412, + 349 + ], + [ + 427, + 348 + ], + [ + 433, + 347 + ], + [ + 433, + 339 + ], + [ + 434, + 299 + ], + [ + 419, + 297 + ], + [ + 415, + 293 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 13, + 271 + ], + [ + 17, + 348 + ], + [ + 9, + 349 + ], + [ + 4, + 269 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 21, + 314 + ], + [ + 26, + 322 + ], + [ + 27, + 331 + ], + [ + 24, + 338 + ], + [ + 20, + 342 + ], + [ + 12, + 343 + ], + [ + 5, + 337 + ], + [ + 2, + 325 + ], + [ + 4, + 317 + ], + [ + 8, + 312 + ], + [ + 15, + 311 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 452, + 548 + ], + [ + 437, + 527 + ], + [ + 417, + 510 + ], + [ + 397, + 498 + ], + [ + 410, + 494 + ], + [ + 419, + 484 + ], + [ + 422, + 476 + ], + [ + 412, + 467 + ], + [ + 402, + 462 + ], + [ + 403, + 445 + ], + [ + 387, + 439 + ], + [ + 382, + 439 + ], + [ + 378, + 445 + ], + [ + 376, + 454 + ], + [ + 381, + 460 + ], + [ + 369, + 463 + ], + [ + 321, + 411 + ], + [ + 248, + 361 + ], + [ + 149, + 346 + ], + [ + 69, + 343 + ], + [ + 0, + 339 + ], + [ + 0, + 783 + ], + [ + 39, + 784 + ], + [ + 106, + 780 + ], + [ + 112, + 805 + ], + [ + 124, + 832 + ], + [ + 151, + 845 + ], + [ + 181, + 844 + ], + [ + 209, + 830 + ], + [ + 228, + 805 + ], + [ + 237, + 777 + ], + [ + 243, + 742 + ], + [ + 377, + 690 + ], + [ + 382, + 709 + ], + [ + 396, + 718 + ], + [ + 416, + 720 + ], + [ + 437, + 714 + ], + [ + 449, + 696 + ], + [ + 454, + 656 + ], + [ + 457, + 613 + ], + [ + 457, + 578 + ], + [ + 454, + 555 + ] + ] + }, + { + "label": "cargroup", + "polygon": [ + [ + 1165, + 424 + ], + [ + 1198, + 421 + ], + [ + 1212, + 420 + ], + [ + 1215, + 416 + ], + [ + 1210, + 409 + ], + [ + 1205, + 405 + ], + [ + 1199, + 405 + ], + [ + 1184, + 405 + ], + [ + 1165, + 406 + ], + [ + 1158, + 406 + ], + [ + 1154, + 416 + ] + ] + }, + { + "label": "bus", + "polygon": [ + [ + 1011, + 335 + ], + [ + 1041, + 336 + ], + [ + 1042, + 333 + ], + [ + 1049, + 331 + ], + [ + 1058, + 332 + ], + [ + 1059, + 333 + ], + [ + 1084, + 334 + ], + [ + 1091, + 332 + ], + [ + 1098, + 334 + ], + [ + 1133, + 336 + ], + [ + 1148, + 346 + ], + [ + 1161, + 351 + ], + [ + 1160, + 362 + ], + [ + 1166, + 362 + ], + [ + 1170, + 370 + ], + [ + 1170, + 380 + ], + [ + 1166, + 380 + ], + [ + 1161, + 377 + ], + [ + 1159, + 368 + ], + [ + 1158, + 369 + ], + [ + 1159, + 414 + ], + [ + 1064, + 428 + ], + [ + 1009, + 362 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1349, + 379 + ], + [ + 1317, + 378 + ], + [ + 1302, + 380 + ], + [ + 1277, + 380 + ], + [ + 1277, + 397 + ], + [ + 1271, + 397 + ], + [ + 1271, + 380 + ], + [ + 1243, + 382 + ], + [ + 1235, + 385 + ], + [ + 1230, + 406 + ], + [ + 1237, + 410 + ], + [ + 1257, + 408 + ], + [ + 1292, + 409 + ], + [ + 1312, + 408 + ], + [ + 1329, + 404 + ], + [ + 1348, + 404 + ], + [ + 1352, + 395 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1245, + 395 + ], + [ + 1234, + 395 + ], + [ + 1219, + 416 + ], + [ + 1213, + 436 + ], + [ + 1214, + 445 + ], + [ + 1218, + 452 + ], + [ + 1225, + 452 + ], + [ + 1243, + 443 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1426, + 420 + ], + [ + 1396, + 415 + ], + [ + 1379, + 402 + ], + [ + 1352, + 393 + ], + [ + 1321, + 391 + ], + [ + 1321, + 459 + ], + [ + 1311, + 459 + ], + [ + 1306, + 391 + ], + [ + 1270, + 392 + ], + [ + 1244, + 395 + ], + [ + 1234, + 410 + ], + [ + 1228, + 426 + ], + [ + 1226, + 439 + ], + [ + 1225, + 453 + ], + [ + 1229, + 463 + ], + [ + 1240, + 465 + ], + [ + 1263, + 460 + ], + [ + 1315, + 459 + ], + [ + 1390, + 458 + ], + [ + 1413, + 457 + ], + [ + 1422, + 457 + ], + [ + 1429, + 456 + ], + [ + 1438, + 445 + ], + [ + 1441, + 435 + ], + [ + 1435, + 422 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1304, + 368 + ], + [ + 1309, + 433 + ], + [ + 1321, + 433 + ], + [ + 1321, + 364 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1398, + 462 + ], + [ + 1314, + 462 + ], + [ + 1260, + 468 + ], + [ + 1247, + 468 + ], + [ + 1243, + 465 + ], + [ + 1253, + 459 + ], + [ + 1262, + 450 + ], + [ + 1271, + 448 + ], + [ + 1290, + 445 + ], + [ + 1302, + 441 + ], + [ + 1313, + 434 + ], + [ + 1323, + 433 + ], + [ + 1336, + 433 + ], + [ + 1353, + 437 + ], + [ + 1362, + 439 + ], + [ + 1381, + 448 + ], + [ + 1391, + 455 + ] + ] + }, + { + "label": "bicycle", + "polygon": [ + [ + 1394, + 447 + ], + [ + 1394, + 438 + ], + [ + 1387, + 427 + ], + [ + 1375, + 424 + ], + [ + 1369, + 424 + ], + [ + 1361, + 424 + ], + [ + 1363, + 418 + ], + [ + 1371, + 415 + ], + [ + 1371, + 411 + ], + [ + 1362, + 411 + ], + [ + 1351, + 412 + ], + [ + 1353, + 415 + ], + [ + 1356, + 418 + ], + [ + 1357, + 424 + ], + [ + 1355, + 425 + ], + [ + 1345, + 424 + ], + [ + 1338, + 424 + ], + [ + 1338, + 419 + ], + [ + 1341, + 416 + ], + [ + 1341, + 412 + ], + [ + 1330, + 407 + ], + [ + 1321, + 407 + ], + [ + 1320, + 418 + ], + [ + 1319, + 435 + ], + [ + 1320, + 446 + ], + [ + 1323, + 452 + ], + [ + 1328, + 453 + ], + [ + 1330, + 445 + ], + [ + 1331, + 434 + ], + [ + 1337, + 435 + ], + [ + 1347, + 437 + ], + [ + 1354, + 445 + ], + [ + 1358, + 452 + ], + [ + 1371, + 458 + ], + [ + 1378, + 458 + ], + [ + 1388, + 458 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1317, + 224 + ], + [ + 1319, + 472 + ], + [ + 1327, + 472 + ], + [ + 1323, + 226 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1313, + 306 + ], + [ + 1308, + 311 + ], + [ + 1306, + 320 + ], + [ + 1309, + 328 + ], + [ + 1314, + 330 + ], + [ + 1320, + 332 + ], + [ + 1326, + 330 + ], + [ + 1330, + 325 + ], + [ + 1333, + 320 + ], + [ + 1332, + 310 + ], + [ + 1326, + 307 + ], + [ + 1318, + 304 + ] + ] + }, + { + "label": "polegroup", + "polygon": [ + [ + 1206, + 443 + ], + [ + 1205, + 468 + ], + [ + 1213, + 476 + ], + [ + 1214, + 421 + ], + [ + 1208, + 418 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1409, + 483 + ], + [ + 1407, + 414 + ], + [ + 1413, + 414 + ], + [ + 1413, + 485 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1451, + 303 + ], + [ + 1453, + 456 + ], + [ + 1457, + 455 + ], + [ + 1455, + 303 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1599, + 402 + ], + [ + 1564, + 400 + ], + [ + 1552, + 407 + ], + [ + 1555, + 413 + ], + [ + 1549, + 415 + ], + [ + 1548, + 453 + ], + [ + 1560, + 456 + ], + [ + 1560, + 466 + ], + [ + 1595, + 464 + ], + [ + 1592, + 408 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1534, + 395 + ], + [ + 1543, + 396 + ], + [ + 1550, + 401 + ], + [ + 1549, + 407 + ], + [ + 1545, + 413 + ], + [ + 1542, + 414 + ], + [ + 1543, + 428 + ], + [ + 1544, + 438 + ], + [ + 1542, + 444 + ], + [ + 1535, + 448 + ], + [ + 1526, + 440 + ], + [ + 1527, + 406 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1490, + 419 + ], + [ + 1489, + 488 + ], + [ + 1494, + 488 + ], + [ + 1494, + 418 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1582, + 434 + ], + [ + 1583, + 485 + ], + [ + 1587, + 485 + ], + [ + 1589, + 420 + ], + [ + 1582, + 420 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1512, + 618 + ], + [ + 1510, + 457 + ], + [ + 1514, + 441 + ], + [ + 1505, + 0 + ], + [ + 1532, + 0 + ], + [ + 1534, + 440 + ], + [ + 1541, + 456 + ], + [ + 1540, + 619 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1594, + 76 + ], + [ + 1524, + 80 + ], + [ + 1523, + 92 + ], + [ + 1592, + 84 + ], + [ + 1556, + 146 + ], + [ + 1527, + 145 + ], + [ + 1527, + 152 + ], + [ + 1562, + 152 + ], + [ + 1606, + 94 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1489, + 63 + ], + [ + 1456, + 68 + ], + [ + 1458, + 87 + ], + [ + 1474, + 103 + ], + [ + 1492, + 104 + ], + [ + 1492, + 115 + ], + [ + 1455, + 117 + ], + [ + 1458, + 138 + ], + [ + 1470, + 141 + ], + [ + 1491, + 145 + ], + [ + 1492, + 164 + ], + [ + 1455, + 167 + ], + [ + 1461, + 189 + ], + [ + 1478, + 192 + ], + [ + 1495, + 193 + ], + [ + 1493, + 205 + ], + [ + 1509, + 205 + ], + [ + 1513, + 199 + ], + [ + 1509, + 129 + ], + [ + 1509, + 96 + ], + [ + 1506, + 81 + ], + [ + 1502, + 69 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1484, + 125 + ], + [ + 1485, + 114 + ], + [ + 1490, + 108 + ], + [ + 1492, + 96 + ], + [ + 1490, + 88 + ], + [ + 1486, + 77 + ], + [ + 1487, + 64 + ], + [ + 1489, + 57 + ], + [ + 1537, + 50 + ], + [ + 1540, + 200 + ], + [ + 1492, + 203 + ], + [ + 1491, + 179 + ], + [ + 1484, + 174 + ], + [ + 1486, + 159 + ], + [ + 1492, + 153 + ], + [ + 1494, + 146 + ], + [ + 1492, + 135 + ], + [ + 1489, + 128 + ] + ] + }, + { + "label": "ground", + "polygon": [ + [ + 1553, + 697 + ], + [ + 1395, + 671 + ], + [ + 1394, + 678 + ], + [ + 1870, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 1927, + 801 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1611, + 40 + ], + [ + 1538, + 164 + ], + [ + 1539, + 172 + ], + [ + 1685, + 167 + ], + [ + 1691, + 162 + ], + [ + 1657, + 99 + ], + [ + 1616, + 38 + ] + ] + }, + { + "label": "parking", + "polygon": [ + [ + 1553, + 697 + ], + [ + 1395, + 671 + ], + [ + 1394, + 678 + ], + [ + 1870, + 1023 + ], + [ + 2047, + 1023 + ], + [ + 2047, + 892 + ], + [ + 1927, + 801 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1779, + 345 + ], + [ + 1779, + 131 + ], + [ + 1781, + 49 + ], + [ + 1786, + 31 + ], + [ + 1801, + 0 + ], + [ + 1807, + 0 + ], + [ + 1792, + 32 + ], + [ + 1787, + 52 + ], + [ + 1784, + 115 + ], + [ + 1786, + 344 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1795, + 200 + ], + [ + 1794, + 227 + ], + [ + 1760, + 231 + ], + [ + 1761, + 205 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1774, + 248 + ], + [ + 1766, + 246 + ], + [ + 1764, + 239 + ], + [ + 1758, + 236 + ], + [ + 1750, + 238 + ], + [ + 1750, + 249 + ], + [ + 1727, + 250 + ], + [ + 1728, + 263 + ], + [ + 1747, + 268 + ], + [ + 1748, + 277 + ], + [ + 1726, + 280 + ], + [ + 1726, + 292 + ], + [ + 1749, + 298 + ], + [ + 1751, + 319 + ], + [ + 1762, + 314 + ], + [ + 1766, + 307 + ], + [ + 1781, + 302 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1792, + 245 + ], + [ + 1805, + 244 + ], + [ + 1809, + 246 + ], + [ + 1824, + 247 + ], + [ + 1823, + 256 + ], + [ + 1807, + 260 + ], + [ + 1808, + 268 + ], + [ + 1824, + 268 + ], + [ + 1823, + 278 + ], + [ + 1811, + 280 + ], + [ + 1808, + 290 + ], + [ + 1823, + 290 + ], + [ + 1823, + 299 + ], + [ + 1808, + 303 + ], + [ + 1807, + 308 + ], + [ + 1792, + 310 + ], + [ + 1783, + 293 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1796, + 244 + ], + [ + 1770, + 243 + ], + [ + 1772, + 312 + ], + [ + 1797, + 312 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1894, + 221 + ], + [ + 1895, + 321 + ], + [ + 1914, + 320 + ], + [ + 1912, + 214 + ], + [ + 1901, + 188 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1851, + 30 + ], + [ + 1844, + 22 + ], + [ + 1830, + 25 + ], + [ + 1830, + 0 + ], + [ + 1867, + 0 + ], + [ + 1866, + 13 + ], + [ + 1858, + 13 + ], + [ + 1853, + 21 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1865, + 56 + ], + [ + 1875, + 237 + ], + [ + 1914, + 223 + ], + [ + 1911, + 0 + ], + [ + 1862, + 0 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 2032, + 140 + ], + [ + 2008, + 141 + ], + [ + 2008, + 147 + ], + [ + 1990, + 150 + ], + [ + 1990, + 164 + ], + [ + 2009, + 170 + ], + [ + 2009, + 177 + ], + [ + 2033, + 182 + ], + [ + 2047, + 187 + ], + [ + 2048, + 188 + ], + [ + 2048, + 130 + ], + [ + 2046, + 133 + ], + [ + 2035, + 136 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1965, + 308 + ], + [ + 1892, + 316 + ], + [ + 1801, + 331 + ], + [ + 1769, + 345 + ], + [ + 1727, + 371 + ], + [ + 1692, + 412 + ], + [ + 1648, + 469 + ], + [ + 1636, + 487 + ], + [ + 1637, + 476 + ], + [ + 1635, + 470 + ], + [ + 1610, + 471 + ], + [ + 1589, + 473 + ], + [ + 1581, + 481 + ], + [ + 1575, + 494 + ], + [ + 1575, + 509 + ], + [ + 1580, + 512 + ], + [ + 1594, + 513 + ], + [ + 1615, + 513 + ], + [ + 1591, + 533 + ], + [ + 1577, + 566 + ], + [ + 1569, + 595 + ], + [ + 1564, + 637 + ], + [ + 1566, + 647 + ], + [ + 1561, + 659 + ], + [ + 1555, + 685 + ], + [ + 1555, + 719 + ], + [ + 1559, + 750 + ], + [ + 1574, + 785 + ], + [ + 1587, + 798 + ], + [ + 1603, + 806 + ], + [ + 1621, + 810 + ], + [ + 1634, + 811 + ], + [ + 1646, + 811 + ], + [ + 1656, + 809 + ], + [ + 1664, + 805 + ], + [ + 1672, + 797 + ], + [ + 1679, + 785 + ], + [ + 1813, + 863 + ], + [ + 1816, + 895 + ], + [ + 1823, + 929 + ], + [ + 1833, + 963 + ], + [ + 1849, + 989 + ], + [ + 1864, + 1003 + ], + [ + 1917, + 1014 + ], + [ + 1936, + 1013 + ], + [ + 1945, + 1007 + ], + [ + 1955, + 995 + ], + [ + 1971, + 954 + ], + [ + 1979, + 932 + ], + [ + 2048, + 934 + ], + [ + 2048, + 302 + ], + [ + 2009, + 305 + ], + [ + 1977, + 308 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 958, + 358 + ], + [ + 811, + 356 + ], + [ + 734, + 369 + ], + [ + 705, + 387 + ], + [ + 666, + 435 + ], + [ + 634, + 495 + ], + [ + 598, + 525 + ], + [ + 580, + 547 + ], + [ + 570, + 588 + ], + [ + 558, + 625 + ], + [ + 544, + 657 + ], + [ + 539, + 746 + ], + [ + 538, + 807 + ], + [ + 543, + 874 + ], + [ + 549, + 893 + ], + [ + 568, + 909 + ], + [ + 586, + 915 + ], + [ + 624, + 915 + ], + [ + 642, + 915 + ], + [ + 662, + 910 + ], + [ + 671, + 901 + ], + [ + 700, + 896 + ], + [ + 917, + 895 + ], + [ + 1146, + 896 + ], + [ + 1172, + 896 + ], + [ + 1193, + 915 + ], + [ + 1220, + 925 + ], + [ + 1256, + 921 + ], + [ + 1268, + 894 + ], + [ + 1273, + 815 + ], + [ + 1271, + 741 + ], + [ + 1269, + 677 + ], + [ + 1269, + 645 + ], + [ + 1249, + 604 + ], + [ + 1235, + 575 + ], + [ + 1239, + 550 + ], + [ + 1231, + 537 + ], + [ + 1209, + 516 + ], + [ + 1223, + 516 + ], + [ + 1237, + 512 + ], + [ + 1246, + 500 + ], + [ + 1232, + 480 + ], + [ + 1208, + 473 + ], + [ + 1197, + 474 + ], + [ + 1175, + 435 + ], + [ + 1161, + 399 + ], + [ + 1134, + 377 + ], + [ + 1060, + 361 + ], + [ + 993, + 353 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 773, + 597 + ], + [ + 767, + 649 + ], + [ + 1017, + 655 + ], + [ + 1017, + 602 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000207_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000207_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..90a8d1376a94482d5c8ef02885d0e8dc6a7e56c2 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000207_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000208_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000208_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..31a14edf4df603bc567e2e7cae382a465eb084e7 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000208_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000209_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000209_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d559dbf7fae25a8303fdc952f21319e954ab2a26 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000209_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000210_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000210_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..da769d34d63d4fb1694c335da337023ec8302e90 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000210_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000212_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000212_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..7018141ffb7e811790df47785da34bde7f95e497 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000212_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..22ee9d1a8ea92d211c1534081158aed4dc514336 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..d3a9bcc90cc53a804657996ad4ee2b4792906ea4 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ed7e8859628cf4f2463b79c275f8733db868cb8e Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000213_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000214_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000214_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..01519a56695bcac32550ae3616d852963d5ff73a Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000214_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000214_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000214_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..2bb553c5b0e6541d06aab2f89b790a72df387478 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000214_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000215_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000215_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..01981dda6d78fd6b4b5995ab59b46b90091a52a3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000215_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000215_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000215_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..68eb5d610aaecc95a96b163108c4b953c820275d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000215_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000216_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000216_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..667e4dec7c91681dffc5c647c4763873c7098307 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000216_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000216_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000216_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..3df1d8b0fbc03b4371a324334efa097b538913a3 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000216_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_color.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_color.png new file mode 100644 index 0000000000000000000000000000000000000000..faafc19ee921361d11e931ba9a64e1b320f07a34 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_color.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_labelIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_labelIds.png new file mode 100644 index 0000000000000000000000000000000000000000..beff1549a6f65300de37c07a6e551fedc9b8df16 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_labelIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_labelTrainIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_labelTrainIds.png new file mode 100644 index 0000000000000000000000000000000000000000..3856051b0e42aa25ef7a98f7a05d7076e7949d1c Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000217_000019_gtFine_labelTrainIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000218_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000218_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..ee317104d40766515eb915a31712e2d22b2d2cc1 Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000218_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000220_000019_gtFine_instanceIds.png b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000220_000019_gtFine_instanceIds.png new file mode 100644 index 0000000000000000000000000000000000000000..42fc62ebee06ffd75d0e101f641e14d9ea9e7f6d Binary files /dev/null and b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000220_000019_gtFine_instanceIds.png differ diff --git a/cityscapes/gtFine/train/dusseldorf/dusseldorf_000220_000019_gtFine_polygons.json b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000220_000019_gtFine_polygons.json new file mode 100644 index 0000000000000000000000000000000000000000..432c1f5d4ff5b816e094bc62cd43ef1b78ed2b33 --- /dev/null +++ b/cityscapes/gtFine/train/dusseldorf/dusseldorf_000220_000019_gtFine_polygons.json @@ -0,0 +1,4704 @@ +{ + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "road", + "polygon": [ + [ + 110, + 358 + ], + [ + 819, + 384 + ], + [ + 1363, + 385 + ], + [ + 1703, + 403 + ], + [ + 1926, + 429 + ], + [ + 2048, + 474 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 358 + ] + ] + }, + { + "label": "sky", + "polygon": [ + [ + 517, + 73 + ], + [ + 598, + 281 + ], + [ + 687, + 362 + ], + [ + 1023, + 370 + ], + [ + 1291, + 329 + ], + [ + 1344, + 343 + ], + [ + 1494, + 363 + ], + [ + 1661, + 313 + ], + [ + 1851, + 267 + ], + [ + 1900, + 204 + ], + [ + 1915, + 0 + ], + [ + 525, + 0 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 0, + 576 + ], + [ + 239, + 559 + ], + [ + 480, + 532 + ], + [ + 582, + 521 + ], + [ + 674, + 498 + ], + [ + 708, + 488 + ], + [ + 714, + 487 + ], + [ + 711, + 480 + ], + [ + 669, + 475 + ], + [ + 610, + 468 + ], + [ + 207, + 465 + ], + [ + 0, + 486 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 599, + 481 + ], + [ + 535, + 489 + ], + [ + 536, + 505 + ], + [ + 494, + 506 + ], + [ + 244, + 523 + ], + [ + 185, + 526 + ], + [ + 181, + 517 + ], + [ + 0, + 537 + ], + [ + 0, + 0 + ], + [ + 783, + 0 + ], + [ + 789, + 4 + ], + [ + 798, + 11 + ], + [ + 809, + 22 + ], + [ + 808, + 33 + ], + [ + 806, + 43 + ], + [ + 801, + 51 + ], + [ + 794, + 60 + ], + [ + 799, + 64 + ], + [ + 805, + 64 + ], + [ + 816, + 68 + ], + [ + 819, + 70 + ], + [ + 820, + 77 + ], + [ + 819, + 87 + ], + [ + 818, + 93 + ], + [ + 822, + 97 + ], + [ + 825, + 104 + ], + [ + 826, + 112 + ], + [ + 828, + 107 + ], + [ + 830, + 104 + ], + [ + 833, + 104 + ], + [ + 840, + 109 + ], + [ + 844, + 115 + ], + [ + 838, + 121 + ], + [ + 836, + 127 + ], + [ + 840, + 137 + ], + [ + 847, + 147 + ], + [ + 848, + 153 + ], + [ + 842, + 156 + ], + [ + 837, + 162 + ], + [ + 842, + 168 + ], + [ + 851, + 166 + ], + [ + 854, + 170 + ], + [ + 854, + 178 + ], + [ + 851, + 184 + ], + [ + 851, + 193 + ], + [ + 849, + 200 + ], + [ + 827, + 200 + ], + [ + 827, + 207 + ], + [ + 830, + 216 + ], + [ + 832, + 222 + ], + [ + 826, + 229 + ], + [ + 816, + 232 + ], + [ + 823, + 240 + ], + [ + 817, + 242 + ], + [ + 802, + 243 + ], + [ + 784, + 244 + ], + [ + 771, + 246 + ], + [ + 750, + 245 + ], + [ + 739, + 236 + ], + [ + 725, + 232 + ], + [ + 721, + 241 + ], + [ + 716, + 243 + ], + [ + 696, + 228 + ], + [ + 688, + 228 + ], + [ + 691, + 236 + ], + [ + 693, + 243 + ], + [ + 690, + 252 + ], + [ + 691, + 260 + ], + [ + 694, + 266 + ], + [ + 706, + 269 + ], + [ + 711, + 271 + ], + [ + 708, + 280 + ], + [ + 706, + 287 + ], + [ + 701, + 297 + ], + [ + 696, + 305 + ], + [ + 700, + 308 + ], + [ + 701, + 315 + ], + [ + 703, + 326 + ], + [ + 704, + 337 + ], + [ + 706, + 341 + ], + [ + 711, + 342 + ], + [ + 718, + 338 + ], + [ + 723, + 328 + ], + [ + 730, + 328 + ], + [ + 747, + 320 + ], + [ + 764, + 316 + ], + [ + 774, + 309 + ], + [ + 783, + 306 + ], + [ + 792, + 316 + ], + [ + 789, + 346 + ], + [ + 777, + 412 + ], + [ + 769, + 454 + ], + [ + 612, + 458 + ], + [ + 616, + 467 + ], + [ + 611, + 475 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1468, + 327 + ], + [ + 1434, + 319 + ], + [ + 1415, + 306 + ], + [ + 1400, + 310 + ], + [ + 1383, + 314 + ], + [ + 1203, + 272 + ], + [ + 1159, + 250 + ], + [ + 1145, + 239 + ], + [ + 1134, + 252 + ], + [ + 1128, + 261 + ], + [ + 1115, + 262 + ], + [ + 1093, + 269 + ], + [ + 862, + 301 + ], + [ + 855, + 285 + ], + [ + 839, + 278 + ], + [ + 822, + 275 + ], + [ + 814, + 285 + ], + [ + 804, + 301 + ], + [ + 818, + 317 + ], + [ + 1025, + 349 + ], + [ + 1213, + 363 + ], + [ + 1328, + 366 + ], + [ + 1453, + 377 + ] + ] + }, + { + "label": "building", + "polygon": [ + [ + 727, + 374 + ], + [ + 780, + 306 + ], + [ + 893, + 294 + ], + [ + 893, + 287 + ], + [ + 912, + 286 + ], + [ + 913, + 290 + ], + [ + 965, + 279 + ], + [ + 966, + 274 + ], + [ + 982, + 269 + ], + [ + 986, + 261 + ], + [ + 989, + 269 + ], + [ + 994, + 271 + ], + [ + 995, + 277 + ], + [ + 1028, + 271 + ], + [ + 1030, + 265 + ], + [ + 1050, + 266 + ], + [ + 1083, + 260 + ], + [ + 1083, + 253 + ], + [ + 1096, + 250 + ], + [ + 1116, + 254 + ], + [ + 1116, + 263 + ], + [ + 1135, + 289 + ], + [ + 1163, + 240 + ], + [ + 1218, + 231 + ], + [ + 1217, + 227 + ], + [ + 1233, + 224 + ], + [ + 1256, + 229 + ], + [ + 1294, + 237 + ], + [ + 1366, + 274 + ], + [ + 1398, + 333 + ], + [ + 1395, + 334 + ], + [ + 1396, + 358 + ], + [ + 1442, + 305 + ], + [ + 1559, + 255 + ], + [ + 1696, + 261 + ], + [ + 1785, + 310 + ], + [ + 1816, + 350 + ], + [ + 1817, + 377 + ], + [ + 1794, + 405 + ], + [ + 1746, + 428 + ], + [ + 1681, + 444 + ], + [ + 1613, + 441 + ], + [ + 1573, + 440 + ], + [ + 1403, + 415 + ], + [ + 1384, + 438 + ], + [ + 1325, + 441 + ], + [ + 1261, + 441 + ], + [ + 1218, + 445 + ], + [ + 1021, + 447 + ], + [ + 874, + 451 + ], + [ + 796, + 453 + ], + [ + 738, + 452 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 652, + 491 + ], + [ + 603, + 491 + ], + [ + 557, + 494 + ], + [ + 541, + 496 + ], + [ + 528, + 496 + ], + [ + 513, + 493 + ], + [ + 491, + 497 + ], + [ + 473, + 505 + ], + [ + 362, + 510 + ], + [ + 268, + 510 + ], + [ + 248, + 509 + ], + [ + 223, + 507 + ], + [ + 203, + 506 + ], + [ + 177, + 509 + ], + [ + 149, + 519 + ], + [ + 95, + 519 + ], + [ + 33, + 528 + ], + [ + 0, + 529 + ], + [ + 0, + 561 + ], + [ + 48, + 559 + ], + [ + 220, + 546 + ], + [ + 340, + 534 + ], + [ + 457, + 523 + ], + [ + 551, + 512 + ], + [ + 626, + 502 + ], + [ + 647, + 499 + ], + [ + 651, + 498 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 570, + 354 + ], + [ + 571, + 385 + ], + [ + 531, + 388 + ], + [ + 531, + 381 + ], + [ + 531, + 371 + ], + [ + 531, + 363 + ], + [ + 531, + 357 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 545, + 491 + ], + [ + 548, + 491 + ], + [ + 543, + 356 + ], + [ + 540, + 356 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 528, + 438 + ], + [ + 542, + 436 + ], + [ + 545, + 493 + ], + [ + 534, + 492 + ], + [ + 531, + 487 + ], + [ + 531, + 456 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 362, + 509 + ], + [ + 356, + 160 + ], + [ + 366, + 159 + ], + [ + 371, + 509 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 320, + 408 + ], + [ + 321, + 510 + ], + [ + 335, + 512 + ], + [ + 336, + 489 + ], + [ + 380, + 489 + ], + [ + 382, + 514 + ], + [ + 396, + 513 + ], + [ + 394, + 409 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 713, + 243 + ], + [ + 720, + 442 + ], + [ + 735, + 441 + ], + [ + 729, + 277 + ], + [ + 864, + 269 + ], + [ + 849, + 264 + ], + [ + 729, + 268 + ], + [ + 727, + 240 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 720, + 429 + ], + [ + 612, + 434 + ], + [ + 612, + 462 + ], + [ + 624, + 462 + ], + [ + 721, + 460 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1043, + 436 + ], + [ + 1030, + 435 + ], + [ + 1023, + 432 + ], + [ + 1016, + 430 + ], + [ + 1009, + 426 + ], + [ + 999, + 419 + ], + [ + 990, + 428 + ], + [ + 976, + 432 + ], + [ + 939, + 432 + ], + [ + 901, + 433 + ], + [ + 889, + 436 + ], + [ + 879, + 439 + ], + [ + 843, + 443 + ], + [ + 825, + 443 + ], + [ + 792, + 439 + ], + [ + 779, + 439 + ], + [ + 765, + 439 + ], + [ + 748, + 436 + ], + [ + 738, + 433 + ], + [ + 731, + 435 + ], + [ + 725, + 438 + ], + [ + 721, + 440 + ], + [ + 711, + 442 + ], + [ + 715, + 446 + ], + [ + 718, + 451 + ], + [ + 726, + 455 + ], + [ + 753, + 456 + ], + [ + 920, + 460 + ], + [ + 1033, + 458 + ], + [ + 1044, + 456 + ], + [ + 1053, + 451 + ], + [ + 1055, + 443 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 725, + 294 + ], + [ + 735, + 453 + ], + [ + 741, + 453 + ], + [ + 736, + 294 + ], + [ + 730, + 293 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 705, + 437 + ], + [ + 693, + 439 + ], + [ + 684, + 446 + ], + [ + 682, + 453 + ], + [ + 687, + 455 + ], + [ + 698, + 456 + ], + [ + 714, + 459 + ], + [ + 727, + 458 + ], + [ + 728, + 453 + ], + [ + 728, + 446 + ], + [ + 722, + 438 + ], + [ + 712, + 437 + ] + ] + }, + { + "label": "guard rail", + "polygon": [ + [ + 620, + 463 + ], + [ + 752, + 461 + ], + [ + 833, + 463 + ], + [ + 914, + 465 + ], + [ + 984, + 465 + ], + [ + 1031, + 469 + ], + [ + 1043, + 471 + ], + [ + 1047, + 467 + ], + [ + 1040, + 451 + ], + [ + 1005, + 446 + ], + [ + 858, + 450 + ], + [ + 671, + 453 + ], + [ + 618, + 454 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1205, + 393 + ], + [ + 1196, + 257 + ], + [ + 1204, + 256 + ], + [ + 1208, + 388 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1188, + 219 + ], + [ + 1192, + 261 + ], + [ + 1213, + 261 + ], + [ + 1208, + 216 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 997, + 511 + ], + [ + 1001, + 503 + ], + [ + 1005, + 487 + ], + [ + 1017, + 502 + ], + [ + 1020, + 489 + ], + [ + 1018, + 471 + ], + [ + 1023, + 463 + ], + [ + 1033, + 457 + ], + [ + 1041, + 449 + ], + [ + 1049, + 439 + ], + [ + 1061, + 431 + ], + [ + 1059, + 421 + ], + [ + 1058, + 404 + ], + [ + 1066, + 394 + ], + [ + 1083, + 396 + ], + [ + 1099, + 381 + ], + [ + 1119, + 380 + ], + [ + 1131, + 381 + ], + [ + 1140, + 384 + ], + [ + 1149, + 383 + ], + [ + 1163, + 382 + ], + [ + 1168, + 386 + ], + [ + 1176, + 388 + ], + [ + 1185, + 382 + ], + [ + 1203, + 379 + ], + [ + 1214, + 382 + ], + [ + 1216, + 377 + ], + [ + 1220, + 370 + ], + [ + 1233, + 363 + ], + [ + 1248, + 364 + ], + [ + 1254, + 364 + ], + [ + 1266, + 364 + ], + [ + 1272, + 363 + ], + [ + 1275, + 360 + ], + [ + 1276, + 352 + ], + [ + 1287, + 353 + ], + [ + 1281, + 341 + ], + [ + 1293, + 343 + ], + [ + 1285, + 335 + ], + [ + 1277, + 337 + ], + [ + 1265, + 336 + ], + [ + 1256, + 338 + ], + [ + 1251, + 344 + ], + [ + 1239, + 345 + ], + [ + 1227, + 347 + ], + [ + 1217, + 347 + ], + [ + 1221, + 337 + ], + [ + 1227, + 331 + ], + [ + 1227, + 326 + ], + [ + 1224, + 323 + ], + [ + 1214, + 324 + ], + [ + 1208, + 333 + ], + [ + 1194, + 339 + ], + [ + 1183, + 329 + ], + [ + 1179, + 325 + ], + [ + 1179, + 314 + ], + [ + 1180, + 305 + ], + [ + 1159, + 295 + ], + [ + 1173, + 282 + ], + [ + 1177, + 275 + ], + [ + 1192, + 266 + ], + [ + 1203, + 258 + ], + [ + 1206, + 243 + ], + [ + 1217, + 226 + ], + [ + 1219, + 213 + ], + [ + 1230, + 207 + ], + [ + 1230, + 198 + ], + [ + 1233, + 185 + ], + [ + 1241, + 187 + ], + [ + 1241, + 176 + ], + [ + 1246, + 169 + ], + [ + 1260, + 173 + ], + [ + 1271, + 173 + ], + [ + 1282, + 173 + ], + [ + 1291, + 175 + ], + [ + 1305, + 175 + ], + [ + 1310, + 168 + ], + [ + 1318, + 178 + ], + [ + 1334, + 184 + ], + [ + 1346, + 187 + ], + [ + 1372, + 194 + ], + [ + 1382, + 194 + ], + [ + 1397, + 198 + ], + [ + 1407, + 202 + ], + [ + 1410, + 209 + ], + [ + 1410, + 216 + ], + [ + 1404, + 221 + ], + [ + 1390, + 225 + ], + [ + 1370, + 232 + ], + [ + 1374, + 247 + ], + [ + 1376, + 264 + ], + [ + 1380, + 268 + ], + [ + 1381, + 274 + ], + [ + 1381, + 282 + ], + [ + 1382, + 292 + ], + [ + 1383, + 304 + ], + [ + 1386, + 308 + ], + [ + 1373, + 310 + ], + [ + 1362, + 311 + ], + [ + 1358, + 321 + ], + [ + 1360, + 335 + ], + [ + 1341, + 340 + ], + [ + 1335, + 342 + ], + [ + 1327, + 347 + ], + [ + 1333, + 351 + ], + [ + 1336, + 355 + ], + [ + 1338, + 359 + ], + [ + 1332, + 371 + ], + [ + 1333, + 391 + ], + [ + 1337, + 395 + ], + [ + 1350, + 412 + ], + [ + 1345, + 423 + ], + [ + 1315, + 430 + ], + [ + 1261, + 436 + ], + [ + 1193, + 440 + ], + [ + 1142, + 448 + ], + [ + 1123, + 497 + ], + [ + 1107, + 504 + ], + [ + 1086, + 504 + ], + [ + 1066, + 506 + ], + [ + 1039, + 506 + ], + [ + 1020, + 508 + ], + [ + 1005, + 509 + ] + ] + }, + { + "label": "wall", + "polygon": [ + [ + 1302, + 424 + ], + [ + 1209, + 425 + ], + [ + 1213, + 448 + ], + [ + 1295, + 447 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 662, + 280 + ], + [ + 669, + 492 + ], + [ + 677, + 492 + ], + [ + 667, + 279 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 755, + 288 + ], + [ + 755, + 245 + ], + [ + 772, + 244 + ], + [ + 772, + 288 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 661, + 380 + ], + [ + 649, + 375 + ], + [ + 645, + 377 + ], + [ + 646, + 402 + ], + [ + 652, + 402 + ], + [ + 658, + 400 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 732, + 259 + ], + [ + 771, + 260 + ], + [ + 771, + 263 + ], + [ + 732, + 262 + ], + [ + 653, + 286 + ], + [ + 645, + 295 + ], + [ + 647, + 346 + ], + [ + 650, + 494 + ], + [ + 644, + 493 + ], + [ + 639, + 295 + ], + [ + 648, + 283 + ], + [ + 664, + 277 + ], + [ + 726, + 259 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 640, + 375 + ], + [ + 640, + 405 + ], + [ + 649, + 405 + ], + [ + 649, + 375 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 631, + 353 + ], + [ + 631, + 365 + ], + [ + 646, + 365 + ], + [ + 646, + 359 + ], + [ + 653, + 358 + ], + [ + 653, + 348 + ], + [ + 642, + 349 + ], + [ + 642, + 354 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1044, + 238 + ], + [ + 1054, + 421 + ], + [ + 1060, + 421 + ], + [ + 1055, + 240 + ], + [ + 1049, + 236 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1056, + 441 + ], + [ + 1054, + 486 + ], + [ + 1060, + 487 + ], + [ + 1060, + 442 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1049, + 419 + ], + [ + 1045, + 424 + ], + [ + 1044, + 431 + ], + [ + 1045, + 438 + ], + [ + 1048, + 443 + ], + [ + 1053, + 447 + ], + [ + 1060, + 448 + ], + [ + 1066, + 447 + ], + [ + 1073, + 440 + ], + [ + 1073, + 432 + ], + [ + 1072, + 422 + ], + [ + 1067, + 417 + ], + [ + 1058, + 414 + ], + [ + 1054, + 414 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1064, + 307 + ], + [ + 1067, + 338 + ], + [ + 1087, + 338 + ], + [ + 1088, + 307 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1074, + 322 + ], + [ + 1077, + 473 + ], + [ + 1082, + 473 + ], + [ + 1078, + 307 + ], + [ + 1074, + 307 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1076, + 355 + ], + [ + 1066, + 354 + ], + [ + 1064, + 357 + ], + [ + 1055, + 357 + ], + [ + 1056, + 382 + ], + [ + 1064, + 386 + ], + [ + 1064, + 388 + ], + [ + 1074, + 389 + ], + [ + 1077, + 379 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1099, + 370 + ], + [ + 1099, + 357 + ], + [ + 1092, + 357 + ], + [ + 1091, + 354 + ], + [ + 1077, + 354 + ], + [ + 1077, + 388 + ], + [ + 1088, + 389 + ], + [ + 1091, + 385 + ], + [ + 1099, + 383 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1083, + 389 + ], + [ + 1072, + 389 + ], + [ + 1071, + 353 + ], + [ + 1082, + 354 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1061, + 344 + ], + [ + 1076, + 315 + ], + [ + 1094, + 343 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1123, + 424 + ], + [ + 1109, + 421 + ], + [ + 1100, + 425 + ], + [ + 1090, + 434 + ], + [ + 1087, + 443 + ], + [ + 1094, + 452 + ], + [ + 1107, + 454 + ], + [ + 1118, + 453 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1221, + 389 + ], + [ + 1222, + 434 + ], + [ + 1224, + 435 + ], + [ + 1224, + 389 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1265, + 411 + ], + [ + 1267, + 428 + ], + [ + 1270, + 428 + ], + [ + 1268, + 409 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1265, + 392 + ], + [ + 1261, + 396 + ], + [ + 1262, + 400 + ], + [ + 1263, + 403 + ], + [ + 1262, + 405 + ], + [ + 1262, + 412 + ], + [ + 1273, + 413 + ], + [ + 1272, + 406 + ], + [ + 1270, + 405 + ], + [ + 1271, + 402 + ], + [ + 1271, + 396 + ], + [ + 1270, + 393 + ] + ] + }, + { + "label": "dynamic", + "polygon": [ + [ + 1234, + 429 + ], + [ + 1235, + 447 + ], + [ + 1280, + 446 + ], + [ + 1279, + 424 + ], + [ + 1275, + 420 + ], + [ + 1241, + 421 + ], + [ + 1237, + 425 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1301, + 387 + ], + [ + 1306, + 412 + ], + [ + 1310, + 413 + ], + [ + 1305, + 386 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1295, + 391 + ], + [ + 1293, + 372 + ], + [ + 1300, + 369 + ], + [ + 1308, + 389 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1354, + 390 + ], + [ + 1347, + 389 + ], + [ + 1349, + 387 + ], + [ + 1350, + 382 + ], + [ + 1349, + 377 + ], + [ + 1345, + 375 + ], + [ + 1341, + 375 + ], + [ + 1338, + 378 + ], + [ + 1338, + 382 + ], + [ + 1339, + 386 + ], + [ + 1341, + 390 + ], + [ + 1334, + 390 + ], + [ + 1343, + 408 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1549, + 409 + ], + [ + 1485, + 411 + ], + [ + 1483, + 379 + ], + [ + 1474, + 378 + ], + [ + 1465, + 378 + ], + [ + 1454, + 383 + ], + [ + 1436, + 391 + ], + [ + 1440, + 404 + ], + [ + 1438, + 407 + ], + [ + 1409, + 407 + ], + [ + 1414, + 400 + ], + [ + 1414, + 390 + ], + [ + 1407, + 381 + ], + [ + 1394, + 381 + ], + [ + 1382, + 390 + ], + [ + 1379, + 402 + ], + [ + 1383, + 405 + ], + [ + 1389, + 407 + ], + [ + 1378, + 409 + ], + [ + 1378, + 421 + ], + [ + 1381, + 440 + ], + [ + 1455, + 441 + ], + [ + 1560, + 439 + ] + ] + }, + { + "label": "terrain", + "polygon": [ + [ + 1697, + 463 + ], + [ + 1620, + 464 + ], + [ + 1586, + 461 + ], + [ + 1527, + 454 + ], + [ + 1487, + 451 + ], + [ + 1430, + 449 + ], + [ + 1235, + 451 + ], + [ + 1243, + 447 + ], + [ + 1384, + 440 + ], + [ + 1418, + 440 + ], + [ + 1462, + 438 + ], + [ + 1489, + 437 + ], + [ + 1516, + 437 + ], + [ + 1551, + 439 + ], + [ + 1575, + 438 + ], + [ + 1606, + 441 + ], + [ + 1635, + 442 + ], + [ + 1672, + 443 + ], + [ + 1692, + 452 + ], + [ + 1695, + 458 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1367, + 103 + ], + [ + 1375, + 445 + ], + [ + 1386, + 444 + ], + [ + 1372, + 103 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1369, + 351 + ], + [ + 1367, + 356 + ], + [ + 1369, + 362 + ], + [ + 1374, + 366 + ], + [ + 1381, + 366 + ], + [ + 1385, + 362 + ], + [ + 1386, + 357 + ], + [ + 1385, + 351 + ], + [ + 1379, + 347 + ], + [ + 1373, + 347 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1369, + 375 + ], + [ + 1378, + 365 + ], + [ + 1388, + 375 + ], + [ + 1379, + 382 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1331, + 405 + ], + [ + 1308, + 407 + ], + [ + 1302, + 411 + ], + [ + 1295, + 422 + ], + [ + 1292, + 428 + ], + [ + 1288, + 437 + ], + [ + 1285, + 446 + ], + [ + 1286, + 458 + ], + [ + 1289, + 470 + ], + [ + 1293, + 472 + ], + [ + 1300, + 473 + ], + [ + 1307, + 473 + ], + [ + 1309, + 473 + ], + [ + 1310, + 470 + ], + [ + 1311, + 467 + ], + [ + 1321, + 465 + ], + [ + 1327, + 463 + ], + [ + 1354, + 463 + ], + [ + 1355, + 469 + ], + [ + 1360, + 471 + ], + [ + 1368, + 470 + ], + [ + 1374, + 470 + ], + [ + 1377, + 461 + ], + [ + 1378, + 453 + ], + [ + 1377, + 442 + ], + [ + 1373, + 428 + ], + [ + 1362, + 410 + ], + [ + 1356, + 406 + ], + [ + 1345, + 405 + ] + ] + }, + { + "label": "car", + "polygon": [ + [ + 1191, + 412 + ], + [ + 1168, + 412 + ], + [ + 1138, + 414 + ], + [ + 1120, + 417 + ], + [ + 1114, + 429 + ], + [ + 1109, + 442 + ], + [ + 1102, + 453 + ], + [ + 1098, + 462 + ], + [ + 1095, + 476 + ], + [ + 1094, + 493 + ], + [ + 1095, + 505 + ], + [ + 1095, + 519 + ], + [ + 1098, + 528 + ], + [ + 1102, + 530 + ], + [ + 1108, + 531 + ], + [ + 1115, + 531 + ], + [ + 1123, + 528 + ], + [ + 1126, + 526 + ], + [ + 1128, + 523 + ], + [ + 1129, + 520 + ], + [ + 1130, + 516 + ], + [ + 1147, + 516 + ], + [ + 1161, + 513 + ], + [ + 1175, + 513 + ], + [ + 1191, + 514 + ], + [ + 1206, + 514 + ], + [ + 1220, + 514 + ], + [ + 1221, + 522 + ], + [ + 1227, + 529 + ], + [ + 1232, + 530 + ], + [ + 1235, + 529 + ], + [ + 1242, + 528 + ], + [ + 1243, + 517 + ], + [ + 1241, + 482 + ], + [ + 1241, + 467 + ], + [ + 1239, + 456 + ], + [ + 1239, + 452 + ], + [ + 1245, + 450 + ], + [ + 1248, + 448 + ], + [ + 1248, + 443 + ], + [ + 1245, + 439 + ], + [ + 1240, + 437 + ], + [ + 1235, + 437 + ], + [ + 1231, + 439 + ], + [ + 1229, + 443 + ], + [ + 1223, + 429 + ], + [ + 1216, + 417 + ], + [ + 1210, + 413 + ], + [ + 1200, + 413 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 959, + 247 + ], + [ + 959, + 252 + ], + [ + 973, + 253 + ], + [ + 976, + 251 + ], + [ + 973, + 248 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1105, + 171 + ], + [ + 1106, + 180 + ], + [ + 1127, + 180 + ], + [ + 1125, + 170 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1428, + 373 + ], + [ + 1426, + 441 + ], + [ + 1429, + 441 + ], + [ + 1432, + 374 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1415, + 330 + ], + [ + 1415, + 380 + ], + [ + 1449, + 379 + ], + [ + 1448, + 331 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1463, + 361 + ], + [ + 1468, + 439 + ], + [ + 1471, + 439 + ], + [ + 1466, + 361 + ] + ] + }, + { + "label": "vegetation", + "polygon": [ + [ + 1578, + 22 + ], + [ + 1558, + 36 + ], + [ + 1546, + 50 + ], + [ + 1545, + 68 + ], + [ + 1545, + 83 + ], + [ + 1534, + 87 + ], + [ + 1518, + 91 + ], + [ + 1514, + 77 + ], + [ + 1506, + 83 + ], + [ + 1499, + 73 + ], + [ + 1487, + 75 + ], + [ + 1477, + 88 + ], + [ + 1475, + 93 + ], + [ + 1458, + 92 + ], + [ + 1459, + 106 + ], + [ + 1463, + 119 + ], + [ + 1459, + 127 + ], + [ + 1464, + 139 + ], + [ + 1459, + 146 + ], + [ + 1450, + 154 + ], + [ + 1447, + 167 + ], + [ + 1453, + 176 + ], + [ + 1462, + 180 + ], + [ + 1474, + 193 + ], + [ + 1475, + 207 + ], + [ + 1473, + 210 + ], + [ + 1467, + 206 + ], + [ + 1459, + 211 + ], + [ + 1450, + 223 + ], + [ + 1442, + 244 + ], + [ + 1447, + 256 + ], + [ + 1452, + 262 + ], + [ + 1448, + 266 + ], + [ + 1437, + 271 + ], + [ + 1430, + 279 + ], + [ + 1436, + 285 + ], + [ + 1432, + 294 + ], + [ + 1430, + 306 + ], + [ + 1431, + 314 + ], + [ + 1440, + 318 + ], + [ + 1443, + 322 + ], + [ + 1441, + 325 + ], + [ + 1441, + 333 + ], + [ + 1453, + 336 + ], + [ + 1457, + 333 + ], + [ + 1466, + 335 + ], + [ + 1471, + 340 + ], + [ + 1475, + 338 + ], + [ + 1486, + 336 + ], + [ + 1497, + 334 + ], + [ + 1505, + 338 + ], + [ + 1511, + 417 + ], + [ + 1512, + 439 + ], + [ + 1520, + 439 + ], + [ + 1517, + 408 + ], + [ + 1513, + 383 + ], + [ + 1511, + 366 + ], + [ + 1525, + 335 + ], + [ + 1551, + 342 + ], + [ + 1560, + 334 + ], + [ + 1574, + 328 + ], + [ + 1584, + 327 + ], + [ + 1611, + 333 + ], + [ + 1624, + 334 + ], + [ + 1634, + 335 + ], + [ + 1640, + 337 + ], + [ + 1649, + 444 + ], + [ + 1660, + 445 + ], + [ + 1660, + 397 + ], + [ + 1656, + 372 + ], + [ + 1653, + 344 + ], + [ + 1664, + 353 + ], + [ + 1674, + 355 + ], + [ + 1686, + 359 + ], + [ + 1692, + 353 + ], + [ + 1693, + 347 + ], + [ + 1699, + 341 + ], + [ + 1723, + 363 + ], + [ + 1743, + 387 + ], + [ + 1744, + 398 + ], + [ + 1687, + 403 + ], + [ + 1675, + 405 + ], + [ + 1670, + 422 + ], + [ + 1672, + 445 + ], + [ + 1741, + 445 + ], + [ + 1739, + 446 + ], + [ + 1733, + 461 + ], + [ + 1724, + 476 + ], + [ + 1712, + 488 + ], + [ + 1721, + 498 + ], + [ + 1750, + 506 + ], + [ + 1845, + 532 + ], + [ + 1943, + 544 + ], + [ + 2023, + 560 + ], + [ + 2048, + 570 + ], + [ + 2048, + 0 + ], + [ + 1759, + 0 + ], + [ + 1760, + 5 + ], + [ + 1772, + 20 + ], + [ + 1772, + 33 + ], + [ + 1776, + 42 + ], + [ + 1782, + 45 + ], + [ + 1785, + 52 + ], + [ + 1784, + 63 + ], + [ + 1795, + 63 + ], + [ + 1799, + 63 + ], + [ + 1799, + 72 + ], + [ + 1794, + 81 + ], + [ + 1794, + 87 + ], + [ + 1781, + 95 + ], + [ + 1775, + 100 + ], + [ + 1773, + 105 + ], + [ + 1773, + 114 + ], + [ + 1771, + 122 + ], + [ + 1767, + 122 + ], + [ + 1759, + 109 + ], + [ + 1753, + 101 + ], + [ + 1750, + 112 + ], + [ + 1747, + 119 + ], + [ + 1736, + 109 + ], + [ + 1735, + 100 + ], + [ + 1732, + 95 + ], + [ + 1727, + 101 + ], + [ + 1726, + 89 + ], + [ + 1724, + 73 + ], + [ + 1722, + 73 + ], + [ + 1714, + 83 + ], + [ + 1708, + 92 + ], + [ + 1706, + 77 + ], + [ + 1704, + 69 + ], + [ + 1699, + 70 + ], + [ + 1691, + 77 + ], + [ + 1683, + 89 + ], + [ + 1677, + 81 + ], + [ + 1674, + 85 + ], + [ + 1677, + 99 + ], + [ + 1682, + 111 + ], + [ + 1683, + 120 + ], + [ + 1681, + 128 + ], + [ + 1674, + 127 + ], + [ + 1671, + 117 + ], + [ + 1667, + 108 + ], + [ + 1663, + 105 + ], + [ + 1666, + 94 + ], + [ + 1665, + 92 + ], + [ + 1653, + 92 + ], + [ + 1635, + 93 + ], + [ + 1630, + 99 + ], + [ + 1621, + 101 + ], + [ + 1610, + 92 + ], + [ + 1623, + 82 + ], + [ + 1619, + 72 + ], + [ + 1628, + 66 + ], + [ + 1625, + 55 + ], + [ + 1619, + 64 + ], + [ + 1613, + 70 + ], + [ + 1612, + 64 + ], + [ + 1611, + 52 + ], + [ + 1602, + 40 + ], + [ + 1589, + 24 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1459, + 329 + ], + [ + 1457, + 333 + ], + [ + 1458, + 340 + ], + [ + 1460, + 346 + ], + [ + 1457, + 347 + ], + [ + 1464, + 368 + ], + [ + 1472, + 349 + ], + [ + 1466, + 348 + ], + [ + 1469, + 343 + ], + [ + 1469, + 336 + ], + [ + 1467, + 331 + ], + [ + 1464, + 328 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1600, + 342 + ], + [ + 1534, + 343 + ], + [ + 1535, + 439 + ], + [ + 1599, + 440 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1528, + 443 + ], + [ + 1536, + 442 + ], + [ + 1535, + 372 + ], + [ + 1527, + 370 + ], + [ + 1527, + 372 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1508, + 422 + ], + [ + 1496, + 421 + ], + [ + 1496, + 438 + ], + [ + 1511, + 439 + ] + ] + }, + { + "label": "static", + "polygon": [ + [ + 1512, + 305 + ], + [ + 1551, + 306 + ], + [ + 1557, + 354 + ], + [ + 1535, + 375 + ], + [ + 1527, + 375 + ], + [ + 1510, + 358 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1555, + 447 + ], + [ + 1546, + 74 + ], + [ + 1542, + 60 + ], + [ + 1527, + 55 + ], + [ + 1501, + 52 + ], + [ + 1488, + 51 + ], + [ + 1455, + 53 + ], + [ + 1424, + 52 + ], + [ + 1423, + 50 + ], + [ + 1427, + 47 + ], + [ + 1449, + 47 + ], + [ + 1484, + 45 + ], + [ + 1509, + 48 + ], + [ + 1540, + 55 + ], + [ + 1547, + 60 + ], + [ + 1552, + 73 + ], + [ + 1551, + 92 + ], + [ + 1560, + 272 + ], + [ + 1565, + 447 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1550, + 333 + ], + [ + 1549, + 336 + ], + [ + 1540, + 337 + ], + [ + 1543, + 366 + ], + [ + 1549, + 367 + ], + [ + 1549, + 368 + ], + [ + 1559, + 368 + ], + [ + 1559, + 341 + ] + ] + }, + { + "label": "traffic light", + "polygon": [ + [ + 1564, + 335 + ], + [ + 1565, + 371 + ], + [ + 1555, + 371 + ], + [ + 1555, + 337 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 1575, + 300 + ], + [ + 1554, + 270 + ], + [ + 1538, + 303 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 2022, + 317 + ], + [ + 2025, + 416 + ], + [ + 2030, + 417 + ], + [ + 2027, + 320 + ] + ] + }, + { + "label": "traffic sign", + "polygon": [ + [ + 2023, + 236 + ], + [ + 2006, + 242 + ], + [ + 1995, + 259 + ], + [ + 1996, + 278 + ], + [ + 2005, + 290 + ], + [ + 2018, + 299 + ], + [ + 2022, + 302 + ], + [ + 2023, + 306 + ], + [ + 2006, + 306 + ], + [ + 2004, + 326 + ], + [ + 2048, + 325 + ], + [ + 2048, + 300 + ], + [ + 2033, + 301 + ], + [ + 2044, + 293 + ], + [ + 2048, + 281 + ], + [ + 2048, + 241 + ], + [ + 2047, + 236 + ], + [ + 2033, + 235 + ] + ] + }, + { + "label": "sidewalk", + "polygon": [ + [ + 2039, + 573 + ], + [ + 1870, + 542 + ], + [ + 1786, + 523 + ], + [ + 1741, + 510 + ], + [ + 1710, + 498 + ], + [ + 1706, + 494 + ], + [ + 1706, + 489 + ], + [ + 1711, + 486 + ], + [ + 1713, + 485 + ], + [ + 1717, + 492 + ], + [ + 1757, + 504 + ], + [ + 1839, + 525 + ], + [ + 1908, + 534 + ], + [ + 2008, + 554 + ], + [ + 2040, + 559 + ], + [ + 2048, + 563 + ], + [ + 2048, + 575 + ] + ] + }, + { + "label": "pole", + "polygon": [ + [ + 1714, + 333 + ], + [ + 1716, + 447 + ], + [ + 1703, + 447 + ], + [ + 1702, + 337 + ] + ] + }, + { + "label": "license plate", + "polygon": [ + [ + 1142, + 483 + ], + [ + 1141, + 493 + ], + [ + 1187, + 493 + ], + [ + 1188, + 482 + ] + ] + }, + { + "label": "ego vehicle", + "polygon": [ + [ + 271, + 1023 + ], + [ + 387, + 1009 + ], + [ + 549, + 993 + ], + [ + 821, + 966 + ], + [ + 1082, + 950 + ], + [ + 1142, + 947 + ], + [ + 1112, + 932 + ], + [ + 1096, + 896 + ], + [ + 1108, + 861 + ], + [ + 1137, + 844 + ], + [ + 1175, + 844 + ], + [ + 1204, + 867 + ], + [ + 1209, + 897 + ], + [ + 1195, + 931 + ], + [ + 1161, + 945 + ], + [ + 1376, + 957 + ], + [ + 1654, + 977 + ], + [ + 1883, + 1002 + ], + [ + 2028, + 1023 + ] + ] + }, + { + "label": "out of roi", + "polygon": [ + [ + 0, + 0 + ], + [ + 2048, + 0 + ], + [ + 2048, + 1024 + ], + [ + 0, + 1024 + ], + [ + 0, + 0 + ], + [ + 5, + 5 + ], + [ + 5, + 1019 + ], + [ + 2043, + 1019 + ], + [ + 2043, + 5 + ], + [ + 5, + 5 + ] + ] + } + ] +} \ No newline at end of file diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000001_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000001_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3141e7d2a9115008cbdfe14d4ca9d704f1a5825e --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000001_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1599e5a68d27e5cf1f0f1e1374c4ba73e7a07897aeb0bb0eaa8fbfc176f561a8 +size 2568049 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000002_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000002_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..78ec912402d370eef25322701393915db4da0b0f --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000002_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9de3502c227dd5bd82431d05ebcff5bdecedc91962291c54e672c4ff8afac50b +size 2370007 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000003_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000003_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f402ef510c5f2f61913a6f3b25d500d2fc6d497f --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000003_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32cc0cd05049916aa43bf1f19d97301feace343888e6f0a2d361b6a5d46e9926 +size 2001015 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000005_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000005_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d23306bed2a1499e74584f97df6ff50eccf57386 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000005_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66be7e4f92259f7d3e25bc5a283ea8b739758e34a5dff657ae11beedb2cd8bc5 +size 2241614 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000008_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000008_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2c5f5614557b70f6d1d6c9154786a0c3c703eb6b --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000008_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d75f628efefa8799711cb16dee2b60b923f714717a98882914cc9cfb4399ec8 +size 2301924 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000009_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000009_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7d7e1599173c7ad1fbb1b9722d3e863f714e0794 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000009_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27bb2c4e66930c1c149c95f90a66ec9218f53425a9c26d56f1157ea8535bd962 +size 2416894 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000014_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000014_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b1e792d073b43ab9d5d1add84bc45ef76258c1be --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000014_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9513a29d526a8d3d41f3f9938ba701e9d58beee93dd4cb5ebfd0cdca7c3c7d10 +size 2252512 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000015_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000015_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5ee7647ac1c163ae32157a5c14702e870cd25d02 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000015_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71b7e074dde12f5ccfc944f0f624fdd62c2ea55f9bee3dd289a59946881f6463 +size 2515205 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000026_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000026_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..9a84b0ee0d2626579e18ba42dbe3f73ed63ab502 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000026_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec5044f4fc412ad36b4791c25be707ff7dc86cd24ec1f6f801eae9ad08524234 +size 2252627 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000027_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000027_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..fde5fa25f4d1b84ac2ffd105f8b03b7ef264ffbe --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000027_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:286fe4ee24f766a7e90e5973adf15679aaf550a9ff0b43526aff0b0d048afea2 +size 2562619 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000030_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000030_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6ab5f6447d0be8a232f606e9d9aa04f00a978108 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000030_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57d930f4c217cf12250cc3ea935e39471b37cc8c0294c34fa9027687978179f6 +size 2262517 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000031_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000031_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6a7159da5e207ac8fb6ff198150db5f5a4f7fb90 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000031_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7a80618ccf916e010b9215d4a3d61a2273a728f872915bfc131729bdaca8830 +size 2229757 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000039_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000039_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..44a70dc4e247694655e7cb39fcaf034e8f090987 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000039_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49fe67d4262b4f668c54042888641c51474c73198e12f8910d07e6111e2ad7af +size 2334141 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000042_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000042_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..54e5f8472ab5b4f08ac40422420ad1be7e6d439f --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000042_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e43134e117bea3beb110b5ce150df0b5193a58ca419646778a65729e6d5a62ca +size 2387243 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000043_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000043_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c6bbba6ff37d128fea6203f3cd1ef1c16c876d4d --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000043_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8f629d08c188779e4d5a14d94ccffd0269c4967b980e53d08e592a34ba8a3a2 +size 2310998 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000048_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000048_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bb5ef1369a83abc052697a2f46f650c9896bc9c7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000048_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61daad9b6c550515a9f34bf58159aa84212d513f1a36e2b7cc4bd0d5fe240314 +size 2274626 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000049_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000049_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..12a3a500a82b45ac948664fb865c483677f432ad --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000049_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f54d9d9acda342fc347d5e4962def892505303447289a1871e2ed1d02ee84da9 +size 2660091 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000054_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000054_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5d1a9e35d499c40bdd4db68914de91a405561587 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000054_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dcb483d08e6e2bbb2136a572a1d6a2e046d90906252ee334ef6528e1a150759 +size 2378431 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000055_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000055_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..77f2687439c4cf26db671c77038c26ae839dc7a8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000055_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d03f1faa58d1a9705f188eee0b31f04af92a9ffe817e0ee776d846be89cbd9b +size 2413673 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000057_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000057_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a3ba6d55cee91914c825b3420ea00ac61c44195a --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000057_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4cda1704d18f5843ae8f1a34fda0feb6d8444f62a575dcc6eb73e1a5964dc60 +size 2443612 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000066_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000066_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b16e029d9a9211a3a51baf7fe4cf6fea46d51501 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000066_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b90545c8969d7b1b0c179ff6b8b269d99a8a90ccb4740a881263818420541ad +size 2478028 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000067_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000067_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0b54b4f3736f953f54dda50277e4ea736d600874 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000067_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aa95ed83ca52dff60313d57185def25071038f8cc53962593eec98f1aa1cacc +size 2427745 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000070_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000070_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c3e71d2cfa4f7f21b63eb5a869b467b32ed12e23 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000070_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:336b07727de11beb3327eb742e26ff1881c89797327ab0b2bb5aff1db78f7787 +size 2279780 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000071_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000071_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2c54472125019017b689ebb9c1a97957bcc64642 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000071_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8aed51be3d5326d7a57c328b62c17f29178c4407c213243060b6caa48a93ccfe +size 2392707 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000082_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000082_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bc030ce4df0b25842536b9f4f2ca70bf995d6795 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000082_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c188e55c8e55ca4c68f0411d160dd37f7eacaa7ffd890ce2cde24efceea654d9 +size 2492826 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000088_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000088_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a0951b522538d17c5835f52248f81cf89fdbbf88 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000088_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c45268366f3027f017816835628b69fda4cbd3c13cd6b6d56189e93d1f3b8d1e +size 2361020 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000089_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000089_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..9410b658c1696cde4ee8ee7dacead8fc856e4695 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000089_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30bad2963a0458733b4f3335ce504ecaf310a46550a05ce5c5b8bd9deaa0a735 +size 2415335 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000094_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000094_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7fd5fd8259147b799f245212708a89da511db9d9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000094_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a0eae3fcc89ab2b04d4711205d3298215cf2346f74ca7568e0c5f05a866fa26 +size 2380132 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000095_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000095_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8906549c4068ed771efb784e14633c169f0f4861 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000095_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec9e5b6a59b4c1a6be38076979cfc4dab069878f5c92036dfae534955fdf84f1 +size 2381857 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000102_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000102_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3d5b6a4f9faea30745131d8c366262cb8311f656 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000102_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b45cf463ec2ccc9f5251cb5891070ee87db5dd9c0bd68a7af676b177e637e677 +size 2336206 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000103_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000103_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7385b4ab5f6e4c051368b4c8eeaf1864b2a46569 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000103_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:627289c7c8af751644713f2be85a309e4bebfe4ebf5fd0259f868bbec0ae259d +size 2349948 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000108_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000108_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6728e9dad40f06b555a87e3d0c1acf33a02a6716 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000108_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:112454976e4b3a1e00068d78347b30a6d489fbcfdc227ba6cb07a2ff2bb321b1 +size 2465284 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000109_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000109_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..fd8329beb4e8803fd2411d45432a536be62b38d4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000109_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95b53583e6c9b3e16c6bf5309468a6bb2547b7c8cd05246ec91b3f55085bbb74 +size 2428679 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000114_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000114_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2a454051e02ee3b940733cbb2e2c4cbdbee386be --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000114_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8c093bba0681bbb85d23619d19f599c2bad2c72d13f2f18e2b269c08edce7dd +size 2230157 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000115_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000115_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..473a0824aaa77ecb540a3f67b10261a8be199b6d --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000115_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a409de242fb9cb9603256eb01c7d9cd6fa8c08d1347931d4a8ebf26389eac7 +size 2317003 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000126_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000126_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1703f3c762d2c63fede1a553d9d68f2ce1d49e97 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000126_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5dbdd801f741f389af50d2dd7fccbdba95e6e3fc9d4afd9229e2391bf9c4d093 +size 2851795 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000127_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000127_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c52c6e0729de5846a9b2416868500bff7916180e --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000127_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1788b1796cb545160cd2a762bec4e8b464e9bb965be0ada32237ae6508f0bb6b +size 2422317 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000130_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000130_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..571f1d3829681b5995cce0fc68c6ed2513182535 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000130_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4941984a05aaf72339b0ab8d1fc51dc15f7e003404797660e9927377d458e050 +size 2281599 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000131_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000131_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7938e192563ed3f4cd619cb52b73b4a72f07fb6b --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000131_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73256ca1df7d048eb71581bcd1a7079d5331f2616d3726d6de246cff968cf074 +size 2324929 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000139_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000139_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6aae7c5ded651d8a0aaaa8fa059ca18a70d10346 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000139_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2cc6081628d6f6d438f608171cb5e267604fcdb74bf62aa9aa0e3ad026c4db5 +size 2375854 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000140_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000140_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4e14d20add87d7b1435b5b30c1840829949d420a --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000140_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bf5800bc4b14942d32becb59c17b62033aca582541cb34e4c4b5da644776e6c +size 2350934 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000142_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000142_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7aabb48c87f23c6699668a9dd1b77ec04885dabd --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000142_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:734c6a24acb03f15217fe420902329a0a270e1adf4f9c51fa5891890f7b04781 +size 2331586 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000143_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000143_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d0149ed262e04652c3b9d1d6e646567d9dc7d8b4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000143_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1563f49e141e9c3d13dfee868e0e764ee08a4196c984b366a3ebd6eb72b9386a +size 2306069 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000148_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000148_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..28e9d613bfa4726d302360d37327504861bcd9f7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000148_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0098eed51b97812e14e4f46680fc14dbc3c1653ace03d5226551339889b99eb7 +size 2326532 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000149_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000149_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2d610b6c2686faf824b5304528f6a3e4c37a44ca --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000149_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ebbccef78df19485b526bc0c873fe65024b7f495f79ea3ae7c2ec1bd395e0bb0 +size 2269461 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000154_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000154_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8af6b018625523a63f466feaadcb9a323a1d382d --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000154_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8bf12baece27539dc1dd84d427c4863568a91863b1a702e78db6990a78452ac7 +size 2235876 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000155_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000155_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3ac00e0eb87c1073b42af4b03ccb3d1e50a4e40c --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000155_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0518413440d489f84e2d09a24ada0f83013174203b5ac9e8865e31caed5be503 +size 2153360 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000166_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000166_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..472b9abbbe2b5ea2b970a3bff5c0374ffdb81307 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000166_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9381e88f2b8b4171441593b3b7d7ce7404431081380bc26c10c6919141f92406 +size 2416450 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000167_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000167_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3a7d71f937f3f89e5ed81f76bc827ea62ca26783 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000167_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c86ca954f263dadaadd69a5eddbffe5140c5c872b91de6020d4558a3a2742ab +size 2494016 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000170_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000170_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..96370e5820c07231f3cc2820f244199d1f213f26 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000170_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0340803f5688473b45dd9c7b5020990973b8861881591c9ca2115e2208928b70 +size 2059701 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000171_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000171_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7a7bffa040cd60ccf7ad3b33ecc79c20dd48c3a9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000171_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9589fb673a655b22946feede09cb85a0743f9c278d09f33325380bf34b88cc5a +size 2130280 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000182_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000182_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cc033512536a6cbe46401d2be19affa5870664f5 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000182_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8bd9cc4156ed7f8fb5eaa8c506cdf9e281c51f2df9b5c20b1f3cc173803af14 +size 2005439 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000183_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000183_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..dee998fd4469aa4fff4685f424b00ebb002a088d --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000183_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9253654e9dcd3491aafd2c9432649323c300336e140fbf961766755476d92893 +size 2425386 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000188_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000188_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..00fc94bb6b139390b554a7f995dd3ebd594316ef --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000188_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b58c5b292cae4719f2ed352be6cc7a9879d0ac04a7d44a59f50688cebb8f0789 +size 2188168 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000189_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000189_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d155ca00fd24687f6560880c47b09203cd6f9764 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000189_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e838d4e27eff847ecd3dcdbc9445bbce3f0065da32a5835a813e6b87ee53ae4a +size 2550771 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000195_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000195_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..dc19f2c8126cda6c49facd6afd72f6777f0dafde --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000195_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c15d2d820bca0ec4216635e437bf9bbbbd3fa605d1cdd5ddcb52130b07379cb +size 2193756 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000202_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000202_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ac0ac635017958638b3592bf2705a30eb215c148 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000202_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f210fa9ed7c9513c0243a8790c0416d1ae6761ef25251e0ddbbbb48dd6047541 +size 2344932 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000208_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000208_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6a89af337a285a7a86dcc2e444a5669ceb4e26b7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000208_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81599289f1ca9767a7d188ace49269408ba8ed53cff8020ebeba6b3e08947a6c +size 2247275 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000209_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000209_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..93367235734c04b55b00739c58438a0255199ce8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000209_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ec98a78941ecd8d9b4de31ea29f533d69676c8be9ff93d169d2b1c9b39319f2 +size 2507299 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000214_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000214_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c1261dc561d66696e07550a1eafcb1d72bd40267 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000214_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c89b36a55206115b780cd6ea3723ef7ec42ba33dd81002dd0d8b5fa768890b71 +size 2540461 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000215_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000215_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bc02cb0de2e0ffa30c151d8fde85a914736bfcbb --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000215_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a33f4cb7c27fa99ff405afa7bb8836521eefabcd875eef3087dac8148783beaf +size 2570693 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000217_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000217_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..466b0c98b18b1206ce9eaf5531da8987d39622b9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000217_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d459addcbecf21b65e5e843af84f0e8cd1760d0a0017eab6dfdd0f19aebbfc2 +size 2199046 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000227_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000227_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..01c23cfaaad044a596ce3616fc3cc8d17cb783e0 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000227_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e1d56ca165a6484792d9689c8388d6509d4746217b4c0b8a1e9435c78d9576b +size 2233241 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000230_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000230_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a8fc6a9cda209e4bcb5d45d8449dbdfd0fb5c4ca --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000230_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:931352ae696c5b81e1790a42a144606af3bc852f4efafb44b35a200e6349e8be +size 2127903 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000231_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000231_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..be94f6538aac3731fc6fdd8af33aea06d261a70f --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000231_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3644649fdf1f015864cf84640f90eb5f51712bac8cd31271f131b300e03992f +size 2064535 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000238_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000238_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..95f117278fa7bb99afb392a9d11569e9565521aa --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000238_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73f9a78fad22a1e2cf06483de1490c4ef5f209b84693c89f91436f2752e6e8bc +size 2257456 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000242_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000242_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..60b68c96533629e50b1034d102ae6bcf7d966e8a --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000242_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9dba40ffd4f7c960154d3024b4b03fce0ba32449d50334d199d7588d97a60f25 +size 2308339 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000243_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000243_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..705f865ed7815a8f09d784f802ff8c155b9a9706 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000243_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c80b58247abaaa5ba8232ee429421fa2cfe96441fe7ceefe08dd8e9a48fa330c +size 2202470 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000249_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000249_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..978e63a5418da9e7ded41b479e68abb70ba6078c --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000249_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50766eedfc93d248e7bccc7ae5cfb77ca49d187877d14926f189bd2cd15243a8 +size 2209847 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000254_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000254_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..32d9eb84bc840fa39b4f197bc8904e4b0bf8db4e --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000254_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e371a75857cd60978860e5da56f978f5752c52afc511f28be68e718483aa900 +size 2339947 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000255_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000255_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8b0249d52b20f2c4d6c9dd3943163994c88ac5b2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000255_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c290db577d0adf34425934260793af83b13cb7a0919bdc44571cc4a11d42593 +size 2129569 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000265_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000265_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..16fdbb035172cd3b9428c0707a12a2bc55165722 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000265_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0524c7306f5bef4ae03aec3ae3831072ed9e42eae95e9e425d57b69ec28a77d9 +size 2199365 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000266_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000266_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..24a0d92aa96622037c1773ddecf20faa74739ba5 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000266_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2b89f179abec5b39a99dba755f797d4659d94915290dca56e37fff27017f2bf +size 2147262 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000267_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000267_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f33daa6d2e2916f210f9f3e4b967a9edc8982025 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000267_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8a85ed8cb8aaf2a5ad70f51492772347987575fabf05ec45a7d55f5e221f41e +size 2250426 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000270_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000270_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..77b7d6b2b25e9a1f1adc796f968ee6efa0d7ddd8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000270_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78a57ca9632cbe2c6285faefcb6237d93ca63febbcad74d5c95c4095d61a4e99 +size 2148863 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000282_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000282_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5ec56ecb1bd5bdece2a1519fb7b89acd6b7deaab --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000282_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0015943e0d255c00291171a554dfecbd7fdbd244faac399d3d042bbb65cbb926 +size 2397412 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000283_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000283_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..10815d489320a99134f2a929e5d9ec6028c1235e --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000283_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bc7a15e3a130adf0d128b9da16e6c917e5976ef7d5c6593c120132629003bb3 +size 2517429 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000288_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000288_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..eed675198269762d0be5b1ee5874fe12ae582f91 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000288_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:005ee73a3c65e28a7cdd119ec101eee4edf9d54f0ae39b6395f06c507e467de1 +size 2226746 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000289_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000289_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..768c9cf7837e466b0534df2330cf2f3ff2b7a7ad --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000289_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:997718e33e9dbf92fd7945ebd8ac2a90a3855c7fb7c3fc45591ed06468293ba6 +size 2356849 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000294_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000294_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..20832059d2a29172a2d07bf443de660046f5f48c --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000294_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c63d0d8de19c9a08977828b21303f2562946f1bb5708a3af398e8405cc120c9 +size 2469846 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000295_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000295_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..caf5356fd4c431b8ff0e0f28b864af14d49dcfcd --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000295_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b40075402bc52468401ab71a30f381634fe1113251ec98caede877c3d9efa81 +size 2360816 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000296_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000296_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..aee39161b2844154ac884bc2317e06632aa80d87 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000296_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3177c589584747558e4a0c21d76b078fc941d08cd7434ef21a0028501ac231b +size 2429563 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000300_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000300_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..22bc7714220e24560a7aac1b41fb0692f5dee998 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000300_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c30e96f3c07ce6b79e5beb7b76323088d1b1a1b3c833591c2ea42c8fba29a3ce +size 2417988 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000302_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000302_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f14c258593de192933817b14dea6ad2fc3674cfe --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000302_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95a00916c7985b2223133811cd4f93d83dfc925554a18fb04620f4a73ed6e4bd +size 2019440 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000303_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000303_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8f7097d9892ebb2f6fe0cb5b75c16400933f8d33 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000303_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35dd368acdccdd5406ea5af048b5ca971dba4f48beff4687bf2eed8653f05721 +size 2078949 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000308_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000308_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..29d1cb5452e9b6022acfe99d91274da9c0fa9937 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000308_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd55254be5118119b752a3310db1bb1b94f3e4b831213fc40eeac11ca024be93 +size 2270216 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000309_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000309_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..420c71e003520e146472307bafc03c63c9edc8ae --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000309_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8305f1978f5585478e460b0ee8e8a00ab937dbf9f02e67d2232eaeb75f9199c8 +size 2472011 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000314_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000314_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..805760f87791e8286a962d9d1ed3e9a545369685 --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000314_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bda5c4626e7c885b5e311eb0c0ad94d5047e57f4a5c5108f233b96caa8e72a27 +size 2233050 diff --git a/cityscapes/leftImg8bit/train/bremen/bremen_000315_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/bremen/bremen_000315_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c5c29242b4adcc9c78aeef9dbeea08de445003dc --- /dev/null +++ b/cityscapes/leftImg8bit/train/bremen/bremen_000315_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25da07c6cdc6176419461efc822f51a122b483f0527759a5361c32f3f5488eed +size 2185888 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000004_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000004_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c284c652fe723b660453f4904b055375b41759f8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000004_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f67132e84662a96375365b0c00e2dc208c5a24bddc48c30d2f213f1417e52dea +size 2194756 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000007_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000007_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..93dc0c42358acef7455969c5714f68a6c09eea41 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000007_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74e60075c7e40c24725927800d2a466a41782d4496d70c6c1e533d945d62cf43 +size 2367958 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000010_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000010_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..06ec1ebcfdeb045c88c02033d990d5fa5d642219 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000010_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc2b353702865d97f8c9458f71c460f2a4963eb4255e050cbf80c6f62eddac64 +size 2222863 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000034_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000034_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7aa6c3deb5b376d53d1b0d626d6215be41ce04f7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000034_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:099e4edc157b4048a52f80426fa926e87fd84c5f853ddfb77e60ec99c8d5acc1 +size 2096781 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000035_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000035_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8bf4e416348e4baf63544701cfa5a652db8a2251 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000035_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3663e41b75ff0b409a0448c03dcba9040edd4e357fe91f77592c5de396a1ef4c +size 2160488 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000046_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000046_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..04f43f55b1c30b0d68484780807116504dbd24d0 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000046_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d217168b6e409c2f556e9a2cf29c0ca788cc22c4d5872ac21bed3d2611856779 +size 1954703 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000047_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000047_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d0d7e0d92276e8b632c26d0f01981720b172a522 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000047_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dde0f749e823a28cbb921e3aca75bfc398894cb8b7eddbaa6d50e37f049b7d29 +size 2274508 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000051_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000051_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..377f9a3868ced0c2459ba4d0eeef51a0dfe13125 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000051_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9aeec699a1cc120432bcbe6451c6475206332cf83e7eedd95288d6d339684dd0 +size 2171096 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000075_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000075_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f747bb7e324985ed160aac01128053c5677c1db4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000075_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50971c1cd0ed9db2d7d8e5c2ef76b4e3c940b70f31df3af47c9662ce92f18d98 +size 2055428 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000086_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000086_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..88cb8576c52993abd60f1d40d0e87f1253a04797 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000086_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a07b0ec9826a7d0920591378c0a5a4db2fe4603e7a5c6276382f49f30255f51 +size 2342890 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000091_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000091_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..274441846296e4d6156652c71cb94b308600073f --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000091_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85191ba5f37d261843a0c69da7c94e40102bd91b8e452f3b92a19d09f076e0b0 +size 2016201 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000092_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000092_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7ea861ec07c9e088063b450b64d334fe36d5de42 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000092_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a88f3fdb43c87784c0616f203cb82d33189ea46f6f16e75efbb2b89a2174fdae +size 2060436 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000104_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000104_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..09cc172e6f32c84853687776e108c901685bedfd --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000104_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69388bd2bff210da34b8a486bc3d6c82df825e4ffd3998e4bdb943f95d97d625 +size 1972657 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000110_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000110_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f9a94ffa1e610ce7323b9560eb6a6c5fd42f5c06 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000110_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82b7bade44547112eff8bacde7185c801f9d601dbe0f5f9b53ecc9fb0c3b702d +size 2351317 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000113_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000113_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2385c8000e1911697166d24228e764b1d30876e0 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000113_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1f746bc418bdc8573887e14237848496c790a53fdefebb73b7bf11a60e098d49 +size 2447784 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000122_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000122_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..59f6d5d6f0ba192fb8779309d3f2713690ff1ed4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000122_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1be944ac9309be1fefc4667a63870655f8a5cef1f32e007e9f3787df8550c8e6 +size 2231017 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000128_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000128_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d2a5b20f2467afff86ecbd5e310c913b1d41d516 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000128_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:121f105c891bb997aa8a3a40fb68274b0e95b771040d86159e9f5e6216cd208a +size 2093245 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000162_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000162_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..337b45a5330d5fb4b466da94cbd88385d1cca31c --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000162_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b11770868f5da073ab031b28bd0a33402223dc27cf9a1a459b0289a3f48b59a4 +size 2058358 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000175_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000175_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ea81c0bc3141df22e1cea551311f409b611a2cb4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000175_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5aefbcf5375847efaad590188e25b9d726246b55a8bd6cd9562e173e82bb00e8 +size 2070733 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000176_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000176_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..078665c6baf35bcac3df433da289e9eca20edfdf --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000176_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d95855ffb047e4a46547da167f8fba9ffa2655196e5cc063a1d96d1a2bf376b8 +size 1971194 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000187_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000187_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5a8d7d7eed6f350a246cf478876c9a3c69c50edc --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000187_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:176f346c15662f17cce12b8c3e00e3558108d5497ad49f8274900b8e5e95bdd6 +size 2004817 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000207_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000207_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a4ca1218038aad20ae61de79e691aaf46d6714fc --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000207_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f857a85947af9ecae352277384ef0edce2be53bc279fe01e133441b45bb3a0b +size 2272478 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000210_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000210_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e7720950cf7d845c5381d84f14447f19d00bd36f --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000210_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c54e8ce2e7d9b496e234583d73db8235e96e50daf57a72107cab60e5d27a83d2 +size 2140770 diff --git a/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000211_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000211_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7e1a91b0a74f152a237f0da2a06fcbc08668cf3f --- /dev/null +++ b/cityscapes/leftImg8bit/train/dusseldorf/dusseldorf_000211_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10f2922b96a2c1cdc3fa2b2385245aac0735387c8701a7986da4ad2de00e1379 +size 2387336 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_003904_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_003904_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..de75cf62c10b9d97ed755b5198e95e242b44aab2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_003904_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a9f1e16f3c0b816a7af9e4b7dbcdd41f899e3590b6241dd6bd0ce983156c90f +size 2611843 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_005639_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_005639_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e34f64b6ca1738948aa87c1472e2e541c84fa5a4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_005639_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3613424f145e3e7c1c57da3548e81c10a7cdf6881b898dd257b699877f97d5c8 +size 2673308 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_014940_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_014940_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b6cf6c03b433591f941180d49ea3a516e9326c83 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_014940_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffe0897d29d4aa361467d35aad527635938376f2ad7c92bde12353f4c2a96ef4 +size 2834612 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_024251_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_024251_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f6bd03d6c96ad6b6ac1b9c8396bb0fbb627579be --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_024251_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c1dbc02ba2115d69560a3fe299391ebe5b0a7023aafee6ec6d5cc8db66ba566 +size 2670514 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_027857_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_027857_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e26de92bdd7db5c6b1b8a64f8749ba7c4d48faa0 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_027857_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8ddf7dc27b3af1a546db102c87bb47bf2bb6f5e46852bb2f74a465bb353c0a0 +size 2459519 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_036527_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_036527_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..98f7bb5cd07d31358860b43445a22339ab47c168 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_036527_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f21feafe05a1c793e5feeea66d35460269775815a1a0e2665ecb56d6f0c4cfa +size 2261228 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_043944_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_043944_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bf24b684b2b465d66a34f4a27bfb02573b6b3a74 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_043944_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:333be8828a45bd61c34fa20869671e21ddb5e37b13d02425ff14f15c8e71c596 +size 2456179 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_047157_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_047157_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ae8d99929e239ff3475fae7463acd219f6364459 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_047157_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53bc561e050c2eef4c9e940cfccf4776f8197c9534ad88fa1f72da150e39563e +size 2241702 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_052122_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_052122_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a14eb5a3ec7a9c3f94a2171e4a5fe63b7d6e705f --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_052122_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c76ac7133f58f220c9f15ad7382f360b78e738780122da9fb4cfa12e6539721 +size 2698228 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_052904_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_052904_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c704e864083b891edf3719c97bd89cfeb87ed1b2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_052904_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4970f90b0ee2bf9993192d317c2985e0b220d9c8fd0e532f635d14ddfd2662e2 +size 2650847 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_054029_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_054029_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1beb398453bf91f286a69f889f76b3aa212e71c8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_054029_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6209d334ab3445edac4d846e924ec55be8ee49fff6ff89797ab4aecb2d1348fb +size 2291413 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_061790_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_061790_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7671f810dd975f6c1927157240079b377ad3eac2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_061790_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e659978fa779c42a8a44a497077d3cf522d388c4a6592adc42b512d16dfe321 +size 2495031 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_062710_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_062710_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..451d819c4e9087522869b93aa9c85f723f16d854 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_062710_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53434a6df91d1970ffe18a257b9ca62d583a4350765460a1635a252cdd4678dc +size 2588755 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_065983_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_065983_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a70e5ec938c4dfbd78dce9687d448ef39dbb1394 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_065983_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4a396bc23f5129491cdde85c7b48da90463a21a53b67cf1533cae921fbc9856 +size 2373787 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_067338_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_067338_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..905858c1c32dd8c1a9f1aa18300ce51901b9e4a4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_067338_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:250f4e6137409a0f28dd0f5c1be6ee5b6edede201f62f9c13da7dde3e39141e5 +size 2375398 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_067799_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_067799_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..15110dea191d53f92b42472e38013902b7baed89 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_067799_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c051f80a95163fca68bdeea453901802edf1ce268d860ed715c7ffae204ac9f5 +size 2426177 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_069096_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_069096_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3149cb43371edb4abbaa4d94e2cedaadbfe377dc --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_069096_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f8fbfaf05c276d58637b6b8f9e2b39ec5b7eb064732b7f8b8b9c4c4b932c389 +size 2253033 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074425_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074425_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..658120e9765766b10ef42fe69d7748f6d7851648 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074425_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67eb93e140aa7c4584986d951be32919383c8cc8796be4f45c20f5b7bd272f59 +size 2560880 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074694_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074694_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7b169e89390caf396d19cfda459a97ffbcd57a30 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_074694_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98566c91fe75d00a32cd1b92ca41b53f97b4de28686216bb956fb01d0b786bd9 +size 2491039 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_077144_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_077144_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..729301184c5ad1bfa5cbdd48f1373f349a8b18de --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_077144_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4eadfd229e9bb7ab56f173e17d82ceb132e0b97f9f039bd763f80381cec7ab97 +size 2472151 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_086636_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_086636_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8ef2fee653969f2befb10a6fb311f295ddf990e7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_086636_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2b984c66b122d3add7f8fd10634cc5597b04b4a5e8b64d8c099cc9db0c01e4a +size 2770359 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_088783_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_088783_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..9c42583585c25a831dc20aee6fb75a1d1d120c8f --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_088783_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:773b4fa6dea3f3e626cc1fb8b8beb1c670141f35f9e0acc47325cb7b844ca934 +size 2390160 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_089491_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_089491_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6adc75c168714c986d0b29aa31ac75403f2e08b9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_089491_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2043bb42fe3bc8bb261bf1ce2bcf34efeb02f0cf236f9215dd7ce6673abdaaa0 +size 2402574 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_089696_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_089696_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0ba3f57839eb22a3ff4968421983f3fd2a1b80e7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_089696_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09dd8af65a02ab54b562405ca492be9f755d191595668377bcec0ed5a271584d +size 2349135 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_100300_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_100300_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7edbe9233f511b258488ad7c43135f6a9a0845ca --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_100300_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afab3075a35972c2001509ba977e813b6f0a0ee207901184d624681f64c5ed52 +size 2358605 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_103367_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_103367_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..670c0d3db769f5cf2799fb9efbaee779494440d8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_103367_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:901b5d67465b904a42d1716082ff2ad1c40a85392ea0a360e61e57a9094f5ded +size 2370138 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_103856_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_103856_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cee26a45d2d99c5530398c8af9b6910deca292ca --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_103856_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd0a672bf700840a65392d4098f3c7a8bd5d8241e854282e1038180c0788c78e +size 2525291 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_104857_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_104857_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c58826ccb2c43980999a0bfb1db6dde6f84cd7da --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_104857_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9499028b08da93a6e20d9e748e9913bdd238fb152d210fada7aad3110b78475d +size 2439938 diff --git a/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_106102_leftImg8bit.png b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_106102_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ec84fbcc13121ee958ee709e70e86ca666f4cf34 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hamburg/hamburg_000000_106102_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0715c536fad30d7be3b65327c70358ad64d53f2e88e787fc9aaad998b7bd9bd +size 2426460 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_000712_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_000712_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a1a39ec53b79294ec78f121f373f130f66525ee6 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_000712_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d276d17bfe0707bb50af5842a1ecea44e701c1001ef3b82fe76c7229b712eb08 +size 2214361 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_001620_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_001620_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3a449ba3d0eb19d83c4ce093b54f43be6a1f36a4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_001620_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e05bacf8605f3f640755b7bda3f2442b8b5240f7b569eeb8a350c08ab4dfb2be +size 2272234 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_002357_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_002357_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..76f5d445e495c7920f301ca039b45b4a4c21af73 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_002357_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e27ad68c7d366c737ac1069385cf2afc212e4da031bacfb0043fe82d34c183e5 +size 2491608 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_004646_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_004646_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..af71bdaaf1e9067ee84e23b616c34470408721d8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_004646_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72b9c35d12cdf5f1d24d42b1b63decb4b59d22961290ad74a2ee4db557279e02 +size 2258910 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_004752_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_004752_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..470dff31f60fbed49cb15e34ace725f1564a424f --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_004752_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d09dca119be55ce854df94b52d3ec80637e98eca12a180ec378a9e0c3140d4e3 +size 2377789 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_005599_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_005599_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d3cb3a3842724971f3aef51183ea65a1c0312c2d --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_005599_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ddc5fd45a8e651a37d646af0cd7cccf68cee64465ed6a601c634fba98cc2a28 +size 2776655 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_007342_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_007342_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e6be8f34a8dcf6b901858c5fd9c268b69ab71612 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_007342_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64bad66fb641d513e7362ab4b4b358a23c0024aecdf18619113945201e44c51e +size 2439711 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_007780_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_007780_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7fbf38d37910bd7aba0cc13553437635c01b20ff --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_007780_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfcc4b5c258d6f6fcd4b16b34215230325cf9151533614ece93692e4da134d76 +size 2345614 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_008200_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_008200_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2bec204e3d9cf9b9df11ba35a2d20336d21c6529 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_008200_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fd6d14b0ce73e7e4c3c2e12c82246230ff968ef9515a3874d97b147baee5a0c +size 2395794 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_009004_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_009004_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..888015d7813c47949f13a05764f9a244e0bfaf2f --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_009004_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae28540c682dda17194fb9874b455a9b3a582224c1b11914a0d8a8cc6c259561 +size 2313489 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_009128_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_009128_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3239470d45f4bbc89e6a490679b94c7e32711a7a --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_009128_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d92716bd9110d78b87a4c9ea02e8aa88406163732bb80af6ac23037fd5cfd7f +size 2498733 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_009420_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_009420_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c3a464ede7553edc7b4bb8a69eb0ebb48a5b0983 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_009420_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84a4197aa6574c1b806417a87a10ee2754a31b3ac86fb19e6df05453db74c762 +size 2495824 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_012675_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_012675_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..9a3f54aac2311798af4aa0ae634e7e1e7652982f --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_012675_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4b2cd8e44eca1b7f8c950a6f7ec18898c5baf9b8433b05a8166054656595ffc +size 2121260 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_013094_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_013094_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f746407421b8b8443f812bb6d890aacb03933af2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_013094_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c74f3063895fcfcdffe34e481c66443396275e25264a2ed978384211ff5c5e9 +size 2452511 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_013205_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_013205_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..51ba7de0feb75f06f816380e0edc4e41e5bd6b98 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_013205_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:978ae5f7a16fde9af4ce3c76feba39306178cd68d2ed49e400d4006d18517baf +size 2440206 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_014319_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_014319_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c17ba709a71ad764dafa5c944ac00aae2f77931d --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_014319_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60d9f56331f6b9473e522434f6d438fceb36a02f6a96c08fd94bf4bc5613c0d8 +size 2126469 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_014713_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_014713_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..97e7ace22077666cca72992bc46d540ad0e4507c --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_014713_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e658f3225a9e8e23d17b8589362aae997c422ca0b19496d22925f896769b962 +size 2262009 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_016558_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_016558_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..16dcefae329fbe0e3a08d1684e8fc78718fdc186 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_016558_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4684e394d52cecc5a177341d7e1685bbefb9bdaae93a5bd7e25174dcb80caed4 +size 2479772 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_017041_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_017041_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d54dd736a952292192d1d855845d62c587245c6c --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_017041_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d8201e804848b5d5258036dc9a37ee48e22d4c9b3cd542d2bcb08e8ae0822b8 +size 2334902 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_018800_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_018800_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7d9aff1e6ce4022ca5fbc9c33833708d1bf0a8cc --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_018800_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65ae9dd0fc055c8c8eb57958823b2a6e77e90d23333a36a9ac3e64b899b8a419 +size 2307303 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_019672_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_019672_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..74633fbd05ae8cc3b9df7a38df7b0077242338cb --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_019672_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8369d1790af19309847fc514b1028c961a41d4479487fd19a79a735da6692bd +size 2518294 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_020655_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_020655_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..38ad8edac8a5aa9e499044ffcde54902b3ed242d --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_020655_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7aa41042dbf39010ea5e06ef9417f26736aa71e2fc8f9e5df801249c4ddc3701 +size 2591318 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_023239_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_023239_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0e810e93832e8d88da9cbb36258500ed63b93750 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_023239_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:091e7abcdcb760404bc8bfc7012a03a655d7a176bde0a9bd3e9acd2b72faff97 +size 2149071 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_023614_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_023614_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e5ed2401d04ea90ca6bbfa885e0f541f10a4499f --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_023614_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:372cbd8d4a7676d8bd30909b65b63d039c1084ffabae1e7aa0549a679b4d90b3 +size 2463179 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_023881_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_023881_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ec93e01890b1517e58b137a25b5f4c47f73370b8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_023881_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:557d22d65f5c7078f4143ee9d44ef421e297efa007bef709319fe543be0720eb +size 2363937 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_024136_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_024136_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..de17352b77466476f0d5e70eeac2b0fe28caefc9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_024136_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38f182067b9eedd12beea620b2336a0b1dc079624f7d15a27de90af6d5e6af67 +size 2218636 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_025335_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_025335_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..706078d5884859cccc70cfe2f1ee4fdafa7b9537 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_025335_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdebae60858cff3a00219236a1628a4b87f48a2357e45f60fdd27cd233d46110 +size 2280038 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_027007_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_027007_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f38f269c92ff587014a0be34a4d9cca57b3293f8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_027007_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fd398a1cfde240627022db2eb9c4fc2a6a2dbd93b56e999f37ab35248a05c2d +size 2501146 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_027481_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_027481_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..51cc9716b86ebc109425d44f06dfed6dbdac6dc3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_027481_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1dc332ab7f1c0c419f445e40fe7937005a78786aa6e1f2c8c29876d913a817c3 +size 2430648 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_027998_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_027998_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bc364b82cc1f26fccea446843328265f824d5a7d --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_027998_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ca05a16a35b9a125806c3bfae316541afc4262259d7444c05b742e61b5c0b0c4 +size 2402968 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_028202_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_028202_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e65ed9717fcd0ecf55ede9c47566cd86c7bfc25e --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_028202_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c381b629bbb892bba124a1a39e18be075c894721d2a58c8c1b49c6ec0cf4ad0c +size 2332342 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_029325_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_029325_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..66cec5b311c8a129e40fe468a98a8ce2a46f2e4e --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_029325_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fddc8400141e1bad7500be8f03808b7738c3be83ba45e8424f675aecfbf02c4 +size 2231155 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_029455_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_029455_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ff9455ca974e65c9f2b72cc005d0ca38eb19a33c --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_029455_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e95e0a86beab556544d110e7a5c85e1f1841983765b5d0477322102369bed174 +size 2474300 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_030346_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_030346_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e7734a0fe199a36de89c9520e30c19c5d6fea90b --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_030346_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74d5f20541e17cf25554d9a4a23a3504bcc6c43923ad754f00e24248ac2e2430 +size 2342157 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_030546_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_030546_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7ed1bfdafd5d386447a6ee8c3f83fb8903b4fffa --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_030546_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ceead44cc10fc96da07ca232977a2dd2b87cb1114cce5ec3a160ad4b2e6a8ec +size 2362472 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_030889_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_030889_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..807d36bfec7777ce8a87d090764c5490aed06255 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_030889_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5140f5a8b18d9d7f90d1fe7b55bac58916c9e605613cb2e7ba6ae9b1d805cb7 +size 2251841 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_031144_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_031144_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7114cce405f27710534d44be52ebbcf239c2285c --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_031144_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9426dbf1aaea352e1445acd0f0eea627048c73f69a7c526b0607b774d9e1bed +size 2303322 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_031856_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_031856_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..38b51230b8fde86f9e3b3a281c683563e640409d --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_031856_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27166d1803d42a5150cf979e020ff8578c64838e3fe44095bd25b6b5e2dcf1f7 +size 2291802 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_034141_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_034141_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3c0a23f0c28283e5f839e2823d02f40e82ee527b --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_034141_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e37ddec33c0dd6d1949a874589ce61b4bdada288e2c3f6064292619292a27611 +size 2297867 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_034935_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_034935_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..fc7c9ed76ab2270eb523de14c5211a0e737e0265 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_034935_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:843151f4e42f36b47e0c66fc4810c321ba3e7f9a709fe0eca688b71aeed87f38 +size 2339861 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_035491_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_035491_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..746cd03ae35b85b75c95cf9c05e6eb62f5e74b5e --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_035491_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f585be58c9353fb7af044b45b7c88a84267fefa0d44d8d00287a040a40b2e3bf +size 2257583 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_035606_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_035606_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..eca828a84da0205d0cc45fb68ab16b115ba1a2c8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_035606_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0450865cc7ae75112f18bacf3a1a933c6fcb48eff8e06ac8a1570a5b9072df2 +size 2382518 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_035768_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_035768_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e2bf375a42299fd133c435ec36b78014216ad7a4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_035768_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f21389065dd70120c83cdc47c0b2d309f561204531f64ea6744b687fbfbfa95a +size 2403240 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_036051_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_036051_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8b4b300e6b47ee9002e22dd7a073d86e8352d8ae --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_036051_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ad9881ea68b1b62578e73a377d7175dbab42435f4f1641712ff51c44bb08563 +size 2425640 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_038855_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_038855_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..273277650d0ca180b117cd9de6b9df508a5e3b99 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_038855_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f8edb8a164a0cd6b8b86a9b66c1051beb6e19f1d0fa1f5851f247a21c172d52 +size 2148820 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_039470_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_039470_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b29f6eb51c4d62a9191dab21254764a9d1cfedb8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_039470_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:128da4a2d721be70e8fc7d6e7bc5ab317b8be0228afbcd8f94a183d42938ecee +size 2368207 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_040051_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_040051_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cca46b74d0163f01033170fd6f9a8f93ff58a0f8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_040051_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a7f408646517a37c4a17978c100115fb2f42a9f54670cb1e462830695c3ca22 +size 2381305 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_040221_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_040221_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ba420215c7454f7b0bc324a4368d79c241ddc730 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_040221_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4c0d6a564e6ce59367e4f7f86697b7dae376c9bef71db8bef59b66f6c7a6c39 +size 2316513 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_040294_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_040294_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..162ddc8a1c4d541e552965a34c1c4b9f3aca6110 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_040294_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1d70a0233b0351c61183f6fefe7bcda486cb2362f7a43f864e4a9cb54792c51 +size 2305042 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_040793_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_040793_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..54a24309447eb956985f15086e848dbe667808c9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_040793_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49aa19135f6af052146598f8bb0605f643f0b7ddd6584fbe84055e68d50a3f4b +size 2293809 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_041493_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_041493_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..9948b064ea9abb89416d83f490af13249457ca17 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_041493_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eff3b77df0be99f7d7a5087af7eef0a0555797b24fad51111b26e991781648da +size 2324962 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_042382_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_042382_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..40a17a445f33012a86c7c67563cc1e1855be127f --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_042382_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:178bc878b512c19e4581fd28427a3cc262cbdad21dad7f854b9a4af7df836ea4 +size 2656763 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_042770_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_042770_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..166b5fc0cc3570fe7d9dff8615ba45d2836b25c3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_042770_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6f67c1481cce5e92e37467ac7e1d0fcffe3625704e3110e68ea6d6b08e4a773 +size 2194802 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_043102_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_043102_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7b496ac9655971d9626b9f871a55fa2ce7209505 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_043102_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c0af455e1b15648ceb1ac1112da8fdacd850c4ecd936c48b98fa527323cababa +size 2069635 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_043653_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_043653_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..991475800661ba0e26be4165211d4434104c1b8e --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_043653_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb5bcc84b959a1049880cf9a24f4950d260464aad042c0837df0148891f4522c +size 2085053 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_044085_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_044085_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..62e4401ab97780f737562ca999e3ae76fb3db899 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_044085_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78b70177aceff87c666f1e4935eba46e93ed4c79811094c6351781a99cdf09ac +size 2158946 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_044344_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_044344_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3478444d65fcbbf5ac46e07a2ac76a8703a4d8ae --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_044344_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c504c3665b4f4f2fa04ee14c3fca3dfb76924a7164ff082273879a5f24b7719 +size 2444207 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..86628110598d88eb638d409988b4fa2109016a04 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_046200_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7160c8c2437782bc11bc01097fb02b5c6d102e1cf968757b0a68a99f1622a907 +size 2338257 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_046398_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_046398_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..16ab23b44fc1bb47e17bf93ffa717c6d64bd13f0 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_046398_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84495a9922cd850bc5837683d20928ee6b727355f6b7105aa730c9622015e69c +size 2377601 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_046646_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_046646_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bae6c9218530661cbd133f7bd420aa5d722f872e --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_046646_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:291a425125986e5ebe83b7c59a34281ee76aa0669940e2281c863596cbea8537 +size 2152964 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_046954_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_046954_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f7330161d6e9c83774746f7cb0ea871a46991f63 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_046954_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7c9be64ba0c48144d53febd199a6c6a9982b9051fcfce0fb995ab9d1b1287ab +size 2346323 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_047870_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_047870_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2e59b5eba8b56d3a665cd81d01769ef31bd3aa22 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_047870_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff3052b44eb0845b1d54b8078216f86cbbde6a092dd74b9f8a61c1c6e3e3af68 +size 2529491 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_049269_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_049269_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..aefa4ac1f5df07f4cfecce46b1ab97ec086ac3e4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_049269_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c15369a770d544135c8167f01593e984cc2494eb0bbce1dfad5f432a286eaa49 +size 2363218 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_051536_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_051536_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..36fc3d3ef0ad85d7c0ff702ecc0fe9d642983ed3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_051536_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56fc86893688648461dbd03669a07ff135f91eb463d5b6543305ab1fe98c4118 +size 2621681 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_052512_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_052512_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f615f3cb34250f03caabcda45d3ea36793e5fe5b --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_052512_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8829c6424d4cb842af07f79f97574e1b12c9fdd513b3258d7ccdb3cc71ec0f4a +size 2149616 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_052649_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_052649_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..fd5b57936b689069d04063d8d09108fb2d9e0aed --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_052649_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d17720644448e846cec1fd3b71b3d01880d99d0bd3ca130eba276a58c03b609 +size 2108968 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_052887_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_052887_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..310eadb4f2cea6d8ae179964c73b972e6e9c43e2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_052887_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:807b2751d34952c5a7ef19c0365e9bace06e8372424ded4e07f5c3ff81e860a7 +size 2444167 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_053437_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_053437_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1157832e1d871c15cf1781884023ca45e4fab3ae --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_053437_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a84effc2301bc73063f405759874c8623672021db7770fafd65e9ad00f9d979 +size 2609128 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_054965_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_054965_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..04e078aec011434beb0b8e35986d9b65ad9129ba --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_054965_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:faba46bb62e5aff24077938d96809fbea6073d9f4384acf71f9f8c719e585f3a +size 2481670 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_055937_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_055937_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e7c4e711feb092c07a667be34700fe6ff8a6dd7c --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_055937_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c4872e1669bd204a310cdf9ca53b214b595379da5909f7bdf08de0b4a3490ec +size 2525528 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_056457_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_056457_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c20dbcea18aced5ff11d8960c861a9dcd30ad7d9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_056457_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e0b390df10a8460e56174f3f86f05a62c33e8fb94a2b06a7b46be2e3c25ebe4 +size 2483827 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_056601_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_056601_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a66e94e333824effd4ec0cec71573b1ed2dac086 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_056601_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39d55617fd390f63d34b7d818aa0e50f7ac9f9b729537fcccec5041b4dfc6854 +size 2708946 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_056800_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_056800_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5efed30cb2984f3131afb27e0c3c9801faa231f8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_056800_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e00fcaf8d44a0b33033d0782c98e324afa6a42f12ac4d9aeb4036f5a9197ba1 +size 2742909 diff --git a/cityscapes/leftImg8bit/train/hanover/hanover_000000_057710_leftImg8bit.png b/cityscapes/leftImg8bit/train/hanover/hanover_000000_057710_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d38301a4dfebc77a72b089044fe9785d0b51f9ab --- /dev/null +++ b/cityscapes/leftImg8bit/train/hanover/hanover_000000_057710_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0f1a5b52cb9dfd4e112721114b319f67194e9b21841cfb9794269b29d7a3ae1 +size 2699759 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000002_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000002_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0bf1dafd7656f0c8530d06c37102fecf440f0352 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000002_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:025df3f4c06b1822f39f46607c7dc1136ba5feea96232b97dc2c86135233ce21 +size 2512239 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000009_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000009_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2d918d572f82b99806e90166daed9f89742902a4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000009_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7614f039608784b1b2834be7b2bf961f95ab3b6359321fd9d7689e61d9defb59 +size 2080439 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000015_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000015_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b8c3d905cc154c037d046899e14f094084d91c08 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000015_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cbf0c10e3b182c057a74a34f2d14f16037505a4b3225ba41312f03cb0a6b070 +size 2246474 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000042_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000042_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..53c44226d4c5763271e976013020e17319f6bbf7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000042_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0928143640d3c70f14e88531267575c9555948c2c105a8d119edb81dc1c1643f +size 2515449 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000067_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000067_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..da67a2999443bf14d36fd7ab5e0cd27f36642435 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000067_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7359b9511bf7b7430f705d61798e390fd6da02eaa647bf86961a66b6cf5b90c +size 2336810 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000070_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000070_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0239fc80c8e4fcc8a6d29dbc19ce865637a970cc --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000070_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed60972d73913ff4ab41658ae32662efe25dd24ef6d5cf83bf6e777a9a814685 +size 2249751 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000083_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000083_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..37ac938dc2ca3e19f07525bb0f8c8df143595fb0 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000083_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd420ff5c26303e8742f018c7884d3bbb58618d60750337a1e49874296a2d7f0 +size 2279333 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000088_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000088_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6d0481c3d8c0db618c4ab7d12015290db067ae30 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000088_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c2a33bc4a054c49019ff16dd7b41993c2c2ebf30a458c35a73e07fbed10d12d +size 2176551 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000094_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000094_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..96da8bc2ba9ffc04d3765ad8618c0e5e440dbcb9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000094_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa2d28b62e994090c08f5922ddfa0b3da51e51b05a1f6a97130950c909448b9d +size 2400391 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000109_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000109_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a93460b5d40c096c8c9b614bd656b3c5f69ddf52 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000109_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1149bbd825ec850a8107a814b44a3324ccef2db0ebc88440dcf850ad3e1e2872 +size 2135726 diff --git a/cityscapes/leftImg8bit/train/jena/jena_000115_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/jena/jena_000115_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3708215dc109a4f4dfe97c4e0045fb1659d2d7c5 --- /dev/null +++ b/cityscapes/leftImg8bit/train/jena/jena_000115_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b27f8fd0eadeabe8a2c2c462fe4b3be62c7fa8d59c744bad9f242f32fe526954 +size 2292362 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000000_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000000_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..507203500beff9700b42a44171bd202a82275951 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000000_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6e562824596144eb5c71e8f42d4df09ca8fd41197c06653855a3b3c9ae0e0df +size 2378288 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000002_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000002_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..42f0e5e4ca3dfe081246fa5430df58d48532b3c7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000002_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50fdf1bbfec2fd84769309f9e4d4b5729e9d42f3ac9c8c76e976ef9e4a6689f7 +size 2237346 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000003_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000003_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a22e426ec304ccce8a8f61f1d1ab65585eb6f715 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000003_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89975097b27549c45572f00858cab18a673cab89b741494a3535402e7a3d7326 +size 2322671 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000008_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000008_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c1a9dba654701b283490a893ecc6c817f613c437 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000008_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b600f4a17e4b4b1a828019bb5a98afb295f94ea891865fbf5fbae0d9f57ce83 +size 2382477 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000009_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000009_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2ff7ab55bc247c6c83801a3eda3aaa40686e119d --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000009_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a42e4eb021cf2b2878ca1c5db23772cc49a0b3eafd8d60acad25bce78dc3bcbd +size 2352537 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000014_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000014_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0efe807be642ba666b6e56bcdfd421c2e11ae695 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000014_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6be27fd0e6c0708601ccd446b5e222ac79e5863ca6483d1a6e390c490a980de +size 2308776 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000015_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000015_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..225745e769f1d6259087aa46ee1d5bb85d518d38 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000015_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af6324a1ccf056a6068622e04fbf804fe388ab50a4dbf4b5bab62736fc8197af +size 2365735 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000017_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000017_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0133c73345b9571e3db0f38b303f942015f39bc2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000017_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bc94432eddbaa32b12ce923295d6e2e2a9078e128dad6435800138437b0bff4 +size 2658233 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000022_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000022_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b2442fca1b2d07136f37ac64369aaa1366c678d5 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000022_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04e528a7f32ebabb053f4b427da75d108693eb61994e734a5ad71bf6f4435839 +size 2116763 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000026_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000026_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ffd3c2d37860476e9db02316f771b610457eb3e9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000026_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b01bc876c7537e8d52c7f5cf58eb501acc6799eba1191bf27a35131392f0d11d +size 2300846 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000027_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000027_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..986b43ec5273af67b2431058411f50664261fdec --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000027_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dbced6fe310199e06a72c28a2976f4c26f354995ed62321871a252ae88809e29 +size 2190858 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000030_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000030_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..13caa19448bf1096de4a5fee439ac24dbc3319ba --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000030_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99eb557c01837c8e09d391f2e66a719afda67525e13fcd7c8a4294c49f23fe51 +size 2505746 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000031_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000031_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ce3318afdafe94821296a3cc7d450785562e2076 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000031_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:250373e457fbea341f5a78b1868003838c74b507fbccd8f8b06cc41c0f6a856a +size 2572519 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000042_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000042_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ef970ea90ba13ea2d7bc9bd84accd2817655635c --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000042_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b63d4c33aa37335d7366cc1e417682ff65ffade9a7c44ca5d918c2ceca00e3f +size 2118088 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000043_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000043_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b73d02d6d8b00fc64f232869d0fb48a1009c2c68 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000043_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d5e008577f0114cdf956de2956f78034e519853b8a81e69cb54eaad1efc5c8a +size 2255021 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000048_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000048_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..914da92398e0f490ab237e4ede717e38c3dce8fe --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000048_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66c63a69ea1c0792c08d2f902aacdd97f75a766ef2e7fcf442967a45a0ea32ba +size 2256857 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000049_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000049_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0e56b2baf89497319feb815a005de18abd001bd9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000049_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ef40e3a220c641db88dbdb4734a1794a5aca84be2fb37b8b16c2c6bc4af0797 +size 2359340 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000054_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000054_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..301907288e196dcca92349841f0c7c7c64dde395 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000054_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:341452756d5eeadaa4efe15e4663eb1f1e1b0b976585c1c969927b97f7b1bf7f +size 2250255 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000055_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000055_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6ea5d702c6911c2991e33fbb70914c71bcaa1f08 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000055_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32040c738fe7d002eea6ef7dbec99b125dd42b93855266fcb327d962d6683956 +size 2355638 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000065_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000065_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cb10bfc274a75a71f124b4e998de1dbc6e66cdd2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000065_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ca1d6f0e10a30d4fd07ff8e67610a0556033de1e4c8d67f11b85c157996772 +size 2248839 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000066_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000066_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ffed4cac13f089f6f457ccc9909338590297d7ad --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000066_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1225f731232c29983c69dfc8b99d997eb4b3df6c53d89e8777073cfbcc73283b +size 2255791 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000067_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000067_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..86f456114941fc55390459cda3d5f87953d3cc37 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000067_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60afcf7da964a6df5d948e1871d2d08ee250dfc43eb5051f6d595427f9c5391a +size 2303385 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000070_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000070_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6b7fbd276ac0711905bc40464da6cbe8b422a397 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000070_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:baaded23c282fb0db56bdd9e3c5ac04906b7dbee2a223f4823c51ff98fcea2ac +size 2233695 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000071_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000071_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e90e90aac5a10000da139e7992f9dc9edc942d72 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000071_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91f9bf5017b5d2e2df4eddda207ace196ebb6650947edcb3cb8c5bdee1edcb57 +size 2223070 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000082_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000082_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..44c5d40e98d7585f8d3e3066231f897054bb207d --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000082_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:411401b23d92afd047e6c42d840a4cf628275dd1a7881440bc4f4e300f015939 +size 2390939 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000083_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000083_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cf7d87347d3dffb7ebcb88420fede84f399eb65a --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000083_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a9fa7cf9e94a6456e6fa6c2fe4c513091f95047318279bdbb0416a162689a68 +size 2551539 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000088_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000088_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4dfad3d8688f74ba1f2c15bb05cd3c12afba4014 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000088_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5502b57055887f9a33a0c8edb227e1b8c7b156e3a92d26a59252362d04fb3fb4 +size 2346462 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000089_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000089_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..aef6190c14a3ec99ce4e7e02c7ae5ee2a94acbb9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000089_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36a6eef7bb30f4ec9c346ddb0b26babc44f2e1e0b7d1bd79cfa2eb50a9cb8874 +size 2406624 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000094_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000094_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6ca8f2dcb960bc3331e59d7eb6da92538be2df66 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000094_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df3b13321d3efe0ee335c53d9fe205d3c3a9fa40f0a3673ad4f6c49a76182ed3 +size 2370535 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000095_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000095_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5a9a9cc3942c0b2fe39f46eae0cd26419499a08d --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000095_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73e3f9e6df345597d45a021cd290461a4314b5773dbef020c3395cb7f6a1c15a +size 2373501 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000102_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000102_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..86f8473672b747f43392911147e8cb330664d7eb --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000102_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05bca8bbba6e9af78d45c9da445a4ab03de4e38d2158f943df4fc90f862811d3 +size 2282997 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000103_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000103_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..15ce2b7fbf1faa3e92ec78514badfb1bce84cc23 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000103_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25f835263dd80b9830e06000d135ae34ce7f049d7e29364c65e18c79ac79e44e +size 2335956 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000107_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000107_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..9fa7e708c1889b45796be529794bd76b49f1197e --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000107_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ad1d2c050a776e0a9d1ecbb6b4ab93cfb537f0cba8ea786ca8a21ad1bcfa3e7 +size 2450759 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000108_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000108_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5ac1536234983e4042f567efbdf1110659470315 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000108_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4ef5f083ded3f621efab95516fbd73b2603129e10f51e83909f595940663224 +size 2674841 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000109_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000109_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a8ffe9709e8e1161508428866abb2ab548e515e6 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000109_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb9d12c8861c48bf5a5a37d66d046584f0fb87212fcb42aa1c194da3737944c0 +size 2810146 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000114_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000114_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bebe1af22c9312e5eabd9978e90ce63decbae92b --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000114_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49b78eef6b48af5e6539a3edc07d8c81a6b55fdb3b16da31b69bb99337b8d425 +size 2322114 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000115_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000115_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..eb996a806debaeb0a28230cf706a999f7837821f --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000115_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e40d39cbe84aa1103f66e257ab1a520cb270c944939065b78c13fba7bccfa532 +size 2388038 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000126_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000126_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..659cdede7503a916d787a3d6b4594a0038be8061 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000126_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bec42ad65a78f3f82c9a92deb688ba08523d1610362d730b0c36cf4a0eb24bdb +size 2282417 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000127_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000127_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d5066dbea3c11fdae50179c023e5905b4d68953b --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000127_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6284607c2281875ce7c804964d90c917d2405f9ac03592dc395db4edfe4863a6 +size 2119847 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000130_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000130_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e61803897921114be4acea8f7d08e852133ec080 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000130_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9288444df846f790f85f83cdee87e74ea39601c676352ff661f2fd49275f1290 +size 2170389 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000131_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000131_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3c98d05f3fdf5a60a0306b8ea425e60153932fe4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000131_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:41d3d57ceb454c6f95ffa2353819d2c5a4a3e980511ab69302358c1297f09d7d +size 2120147 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000142_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000142_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8e0c13ac7a2ed8cbc9f5e004643b6054283aaed9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000142_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f37b7ad32b3ef209a6404afb5bd69e41278abb247199c9a22e88e996d128d079 +size 2111597 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000143_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000143_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..961a8ff7a5ddcc9b3e4fe37363b185be6e15d38e --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000143_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d59290355ee9053aaa04606cfdcbd9f12b7f68417212ef28bd726c5446884a02 +size 2185925 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000149_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000149_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f299f034cf03cf4a73c3548611e620e0ae0bd938 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000149_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da183d43cdbf10ff5e753f684405dcfb5c935a4b53e4fdb9d08d8fbc5a843fd2 +size 2054211 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000154_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000154_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bd6617a62d7c19b4cf0ae345e26dff0ab603f3ad --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000154_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61fba76938990596a13f6148b608b3ea2581d4defb10b2b112d38ab7718ba612 +size 1974838 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000155_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000155_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d273cddbd7b11cd056af6623af311c583178736b --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000155_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a95889f95c4e1b0bfb984fdcfd4315735af6b08d3b6ed1403792fea15d174977 +size 2231789 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000165_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000165_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..197dee331beaa196696987d43af1f8bf72c62f15 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000165_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd741a9d680ca1d67046500c9e866d115753377429942d68aa0fc5352f6e03fb +size 2466145 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000166_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000166_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0ac18ce4528ab6a5617b5b65aaf6c5ce2de7944a --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000166_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afbcc38b7ab14e998286b8834608b94ebf06e790abc5b5af3015c2e3c16c457e +size 2300775 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000167_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000167_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1a1e0b723be1284de77ec2b6ac41ab1d0582ded3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000167_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ebba385e3808b3335892cd4b1b018f28b5de0751c65fd1d20ea93684bc466ec +size 2632145 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000170_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000170_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..06eac984d39f96266c4fd356c7baca40f431b7bd --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000170_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e2cb7028db77deaa1764f9efc888da3c89b360af71bdfa7fe4909352712e622 +size 2657372 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000171_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000171_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..68bedefebbe66ad9d8b7589a028f225acaf07e69 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000171_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cf67ab08e903b328b441d556213a5714b8446352411a35656222413cb580d86 +size 2637423 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000172_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000172_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..514dfc66994ee95e696114008b47628e8133a562 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000172_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23bbf34e5a34d2f72960aa36a1897432aaad348c9bff7e2a1c7947728aff3c5d +size 2570995 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000179_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000179_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..deb952df00474304d78fe9e117b6951b6d04c2b6 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000179_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4e0ae839ba1e14025453b742225f4ee05d18d73d43b6a714876f0509ac77b36 +size 2400529 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000181_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000181_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1f3e040c498a300355c018fe4f2640b72e07b385 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000181_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a05c3f0d5902985421e0c7aebebdd4789b44bd9fbe9a66d897bda5fc984ea0a5 +size 2402476 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000182_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000182_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..663a009b6597d9c73ee68350fa439a6f07313ddb --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000182_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b6d77191aeed46b630de35d5e57c22af4768d8925677a1831ceee01a1dd41de +size 2393737 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000183_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000183_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b938808a4bc44d8468b160a1cff7399376ca664a --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000183_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2a0a082c8f271240d1237250ae28451a8ffd806c1bc730aba801f35638b157c +size 2333159 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000188_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000188_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..56bdba1a12414701c51665ad9af793150a46f2fc --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000188_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:023be20593803368432f2d4aeb730ef093b2943717e0fb524ab227b87e7c956f +size 2296790 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000189_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000189_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a103cb90ef3d5562dd76013330af168bccb07656 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000189_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f38619f296ec39f40dd481f84d9cbac4b716482ead55cffc472de4fb4ebeb537 +size 2369850 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000194_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000194_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..73872f115e137fd426e53f58be183b1e959077b3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000194_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aa766c15de62ce1d2cdf4f26f254de7a3727299243537cc3ed4dd611721fea4 +size 2149491 diff --git a/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000195_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000195_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ab67f5e8820d587177a72bc926920233f950506f --- /dev/null +++ b/cityscapes/leftImg8bit/train/stuttgart/stuttgart_000195_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d1faad7bd93abe8fa64394febd34aaa6cc8ad450e9fd77453727362c2596020 +size 2035252 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000005_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000005_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..07c68bf56fdc3a5825850423cd4d8fb01d70949e --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000005_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93e820aa0e0ed4fec5289a1388e45220ca6e08958319a0fed5d81905d526c9fc +size 2469361 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000006_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000006_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1df1a2b6151c39e247b57f62dca5bfddf061a10a --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000006_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49a918eeb70f7e7592436e965478b3a0cad610c97512e18bfb7fd0a4b2cb66fe +size 3031500 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000007_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000007_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d39cbc3ab6c26b674267e221fa68ecbeeca95f41 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000007_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e694788f673f1fe2c0616bff558f0fa1ea7b0825859e5197c40b69588c898316 +size 2685712 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000010_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000010_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..686a5b9c422a7bcca071f3ca9bbe0583db24ef12 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000010_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d0025aad59722594a845215735870e01e8c98a655719a85a8ed35d3f1ac470d +size 2461377 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000011_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000011_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7ff5c1b73a852e4264ed7b13d695ca872176cc8f --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000011_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0623fbe29dcdcd14b43eb93f1816f4652e24c6373744ca4634aebd970c20c40c +size 2648999 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000012_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000012_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..640baaaeb517f01a01688bf5d7426748e5277864 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000012_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2c829a3272ba59fdcbc605cefbbfb2afbb713d0649d6844467bc50edd537b953 +size 2681445 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000019_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000019_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..adbe01f585499f904b4987769b227121607dff34 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000019_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d489455a3ed1fd6520371c1b983c1980c4c6f6aaca906a5ccd3f8b1b73d9aab +size 2869453 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000022_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000022_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6b48ccf5d444a7540d4406add6cddcb6ce93e172 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000022_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17176e608f4d1b10c1d601aff4da1d41e11dfa49db7524a13fc6461332fcc1ca +size 2295998 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000023_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000023_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ecb0dd4a70b9861576514018e6b315edf3467855 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000023_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac423d57e2de5af0fe2a719ea63a7ec33468bbb9d6cca88e1a2ca0e550dcc10b +size 2309783 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000028_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000028_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4c2130affc45938c63efdd3f3afa0a6452bd6e4c --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000028_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6e4951b273fefa39427acbc34f087b566bd1f5a489f38b8806009607e43e96a +size 2602675 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000029_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000029_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..64762026e1198ccc6a2ce81552404bb367b1d1da --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000029_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0711944ff71e2790b66bf37492e8621738479539ef45b73bad34f5947cb77d5 +size 2390409 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000034_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000034_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..84194dd5b1ff6654489498ea5162989688efabd7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000034_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bf1e1d9a73ab9c20d066d0874687c53dd921614a6a8486e8d8a0f372425cd9e +size 2155084 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000035_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000035_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bd9ab64733bc619b355d84fee6b99e60fa454506 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000035_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e4503d64f5177ae2edbb029d37887182973ccdfedcff09dceb567525d39bec4 +size 2080623 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000046_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000046_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1e51365d5625643802d74cad607bca092d5ff89e --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000046_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61709627dfeeede281d09aa61014e46cb3a382baf80798cba1cf5cea9b97af33 +size 2332490 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000047_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000047_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8560cfbe61a4587611ade18deeb4c2e4d565fac3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000047_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d831aaea3330d9f7748e0c56f6849a7c2ee79c7b0f4b85b7193af648495925b +size 2209702 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000050_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000050_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..14f9f8abbb390d1aa4ab4a5f21be7d73a7ed9ae2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000050_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fca10c187480aa548bba25da6a95b9c03e80d65a37b6ed73afb2327a27cc8ab0 +size 2331996 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000051_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000051_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3b3f05143d6a570cf817e85d9e40a9ee87738a9b --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000051_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3a1aa1b04a94efd00a7ad2abd42f2bfcea1184e3549521bc0871f99c92186804 +size 2232902 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000062_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000062_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5f907172d64c04628e1678e69683d6ca840e941e --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000062_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39446bb3337b4c77e8226be3d7e8fd8aaa0667b7e3e7db7620b1ac28e486aadf +size 2482803 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000063_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000063_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..11836351dfccc2d33db9b39bf5301f47be9dd395 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000063_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eb2aaa4531baf87337dbdda72b7b9a25d6e73778f229a583a7bfe6a6f53fe1d +size 2469055 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000068_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000068_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8708cdd21d1b33c18bf1b044e8b090609d03af42 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000068_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c30c785a5616fd73aa34b38b7798e68ee90dbcfbf7553578ba375c2ab9dc84b +size 2628334 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000069_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000069_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ca43d6a680d4f8c543ac9db8f0775cb7710ef0db --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000069_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1002487acc27d0d8e9ee67b580c8f656b332620bf8fe53f7478cd634459155a2 +size 2431873 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000074_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000074_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..88d5d200e9121ec992a4ce24c1296be6a2acdce9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000074_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:338c6a692ac0f3e3a8aa435a37d9ccfbfac7352cbb0c416fe10e0f39d40163b4 +size 2608201 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000075_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000075_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d21b6936d9de3516d30db4c378538b40e2724b1c --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000075_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aba2fc332abe51ea95413e1136c44efe6675db9ace14f12d59c5c7803491ef4 +size 2543238 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000077_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000077_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c9766682b23c4da82f85b92c85483ea153bfaba7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000077_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b68d9fd27d288d4900bdf27b96a83a47ef6c284f7d0d36fd778ec310630a9dd6 +size 2170607 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000084_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000084_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..15846e45c15f25d279106cf9e1250b3a885f9b97 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000084_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:232768fbce27cf12ea6962ca3e8646860c9155b0aa902b24dee85eedffef99da +size 2285595 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000086_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000086_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cdbffccbdeebef3ab403bf364552e8720fbb4b78 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000086_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:239ab09ebb9c61b823d0e2c46506f9f3b83b552fef5bc715ca94986de4eea4b0 +size 2336059 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000087_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000087_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2cf369f7a7f300a2edaf8954a5f10376f73ae8d3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000087_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:096094b91a6cc43c2490650466f01075af937eed196edfa149e3eea4336b420e +size 2456133 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000090_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000090_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e7d900eb0c5bddc439c20d5ddf024624a2331364 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000090_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77ecb75b43bb78122752c59c6b1ee021286f79555cc7261be36edbe4bd1f64c8 +size 2359089 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000091_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000091_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..33f8c41d2a0eca53d86b7aaeaf98641b38157e91 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000091_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d1f90fbc21478f83d3ea0c57c84469597facf5c79816f31d93e3f1cbc025781 +size 2296241 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000097_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000097_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..90def085dbd1c3a147f2cc2843ef0cbf6bb88751 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000097_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:67b8b5a61ab3b8a9939cb246f3e82e1f78ff974be2ad778b6a4985593bb87d62 +size 2688766 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000098_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000098_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..04f10b82c0e337623012c6f13d31eef512203952 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000098_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce46e43f3e5e11e301c78efd582d64b2375edc42a12d1f15bf19feb7484ccc4c +size 2370848 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000106_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000106_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..35d2b694fe5445cacef9f08719dca34c83c457e6 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000106_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb725a04440fd18b809d9c828f4f5a89e1ec273ad7b020d8931393fd9443a97f +size 2758183 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000107_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000107_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ed940559182b703fcc0e2f052c7a7e6b1073f72b --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000107_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b78ea0949c9af021066975ace1a50eb051707aca471b1453a07b38e3040fa13 +size 2248021 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000110_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000110_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..73e9974c9d76443bb4808ac97a51aaf2d3a422c9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000110_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d6d0dc1d20bdf8fd405caa3f42b89f5caae070cb09f6ce19409ee7ba5810a29 +size 2638736 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000111_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000111_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b9253149c05ed70c5cb7ede985b6ce7ab4ee6be6 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000111_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e1b5a5e209e8aaa5bb1d216699992d01e7685f23af40c4bf78b755272157f34 +size 2655259 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000112_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000112_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2cec77d1f8977fc808714d2fb911c41328a4dee9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000112_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7bde50643d1b03cf734060ac49520f4ff786f4952065b20e73d2db93e575e65 +size 2631378 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000122_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000122_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..59d3f2caf4f10ff58919d03826521fbc9ba0fb10 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000122_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ffb8282093260f00d7dfb98c62ea92e4b5d51a8a6c14b1af969b73014a58a78 +size 2400358 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000123_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000123_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b54feeac650bc965c7b268b33fc5603963a78f82 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000123_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0de6f8bed261cb4836aba010e144ecbf45b32ca999cc76f5a1c3c85fdfc990bd +size 2219699 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000127_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000127_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7d90958553308c783eb124339d2a6b9f3f34de91 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000127_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21567d8e8ac50e629f73ed950dd6f69fa70db63a93458954c52ad335879571df +size 2643201 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000128_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000128_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bb017f33349dbbb26d969be7816bb057ec74a6d2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000128_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2713e2cf3992e01c8725382eab81fc62fe71b0b0526aaf99e60e7c15932c5e4c +size 2770482 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000129_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000129_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5b5834d3c25843bc470362425ea70049db594f72 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000129_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d56ef122b36fa98c73fecd051a9708881e6b9ea6f9c0f3999d0bd70a06227ee6 +size 2575661 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000134_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000134_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d946cc6164dbb31399d77cb0f2aa5e35ae7a7ea7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000134_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8508087c9cb9871d31239459134372b31cd72fa1f0121465c9e2b6f6deb73885 +size 2626322 diff --git a/cityscapes/leftImg8bit/train/tubingen/tubingen_000135_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/tubingen/tubingen_000135_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d4450d32beab5972b850bf28b70bf9e02e86192b --- /dev/null +++ b/cityscapes/leftImg8bit/train/tubingen/tubingen_000135_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c7544191f10d584685c9f6effb644ae26f22de3d382c791e04df76a01c22870 +size 2754900 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000000_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000000_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..87bd5066cc91bcd96ae0cf25709531e756af6ab7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000000_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50b743de9c85de7f1fb4894e202906ef2df62711204f9091f6bd92662949a09e +size 2189273 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000001_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000001_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c1cfe5b3f0c735122d4d3b013c3bebef5d4a8c1b --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000001_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56bd9a43d275c4bf2277e12077adfd7b8693659cefb95fc2b1cffc8e826a1f2f +size 2178944 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000002_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000002_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3af911d3115602510fd085ba8efcf44d784f04e8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000002_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b4fa1f8a7d742fad6b813f1a3569a3bf6612e05e695dc6cac429bf79f3f6925 +size 2449927 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000003_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000003_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6fde1e7d94fe5106f6d861950ee2a2c992ef4648 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000003_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6f7b2978f1befb70a4bfd8102b6e01d515cabed6dd5d8998209bcefb062373d +size 2181752 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000004_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000004_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2c72db492e37f62a657b14744580bfca6483dd32 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000004_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2c18edf10e49e691410a32ad15593319aa01c10e6ef1986b793038e0edf4787 +size 2730581 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000005_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000005_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ebd015793ad78eb2dff7571ea2f6ff332f46b1b7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000005_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:623c26d586a733ee0945c97bf5e25b41d4be261335c18b22e80b927d34e535a7 +size 2453892 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000006_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000006_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..31bfe69d5d0b3ef0bdfe75b7b1048254ffc18f1d --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000006_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4def1dceeb6dc2620d44a7d7cc7796dea4baca43670fc5ff73ad6a26097c678 +size 2703618 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000007_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000007_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..206a9f3c87c275cd86744a8998def21c36f12b3d --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000007_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0a14f9fce7b6e318d259dad5bccdddc5dd8b7bea2622c188ec009f9ab26735ee +size 2639669 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000008_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000008_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d8d16c3a487f5add4aadda762c45dc7320cbcfc2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000008_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaa8bca4ed3188fa4d25ae3bf5f0926d5fad6288f65362a8f17447642c183418 +size 2185130 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000009_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000009_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2cc9ba499ccfc03ce8336bcf772bfd033c99699a --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000009_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78bae62271244d303bf4d725f59b89410e15443a0fb3eea9532a924314ddfdb7 +size 2496358 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000010_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000010_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..14ee9af14ec0658e3f95de4005a1ab8a3d69cd82 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000010_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f06591bf0ddfa3ea1fd32ab72bee5a3910f06d22970efeed24ea380c1a04beb +size 2464646 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000011_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000011_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e7227902fb0403f0bfe0d7ed66ee0998c5b37c8e --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000011_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:649b81be52c251689edffb08b52b0a76b9e9e3b27db07c33c7fe9bc5a438845a +size 2470768 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000012_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000012_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ea748fee8f26bac60c2e9c059d2ddf09e6ad24a7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000012_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e6c959683ee3797e7b85cb213b7088ce44765b8494cc71bc25b6ba20a11366d5 +size 2493810 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000013_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000013_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..02b530be6c56e0e4aeafb6d184edb551924eff61 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000013_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:595c15f8dd9f8fd447599dd4d67ea0ac0b95296519f5e8f7f466aaabf37571fc +size 2988480 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000014_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000014_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ebfc49bf57428ad1babcb6bcef501ab348d1fe2f --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000014_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1b57366cafb777e9787f8b159bd58350e70f69ca8b4b722c5bfad6db69191be +size 2535338 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000015_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000015_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..982e266ad970ff231001892a75f03156fccaad32 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000015_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:219cb668ac1941f31a78fe73fe88d5e9423d55bc5afa48b91942fb257a37dd4a +size 2048116 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000016_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000016_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2d7dc914b6b523d7be5f11936b4fc55fa4d687ef --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000016_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1b3337292a1e0e000c864fa75b5758ff7de5de1828cf4fb17b74c793890c99e +size 2409515 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000017_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000017_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..c1c1aefe8d7e13aee26456dd22d23bce1e92efa3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000017_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdae19f8ed8d62130f1c4fdfb19b8718a071013d576e826d5e4c70d2e4739c3e +size 2640858 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000018_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000018_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8febaa93fcdea494a217240cc657137621a3c500 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000018_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ccf575bf2f947eb5e31b9e85d645cbeb89278f5cf30e75e9cc7355579e39941 +size 2399825 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000019_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000019_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..46d7bb62cfbac45d6aea1c92cfc38cc512377d5f --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000019_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3da1089280cf41b1fe17ce2a37dde64dc138b148f4d120845ded2f5030fbf0a0 +size 2487169 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000020_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000020_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7195f5532948431835f99b9c56a2795a47df20cf --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000020_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be3f87c43cc8b284183055a50e013f451a5ee55859301761824975729500f790 +size 2588041 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000021_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000021_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6c71ceb882eef348c1b92fc0a713ccdf4c864090 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000021_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f992ccf2dacf09339753ed96b838a3f7d3829e60a1a839f96ae42aaaffe1fde +size 3096487 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000022_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000022_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8977c2c15584ecaed2173705d4b5f318135a07e7 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000022_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d02514351604ed62a90a416606dc3712aadfee780a321a12c216b636e0b7a0a1 +size 2216955 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000023_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000023_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..44339dc755101e86d923d2e890041ec0f1d7aafc --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000023_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d773891b1137a2316cf05dfb352c21cb0a62faeefae175a33f0d0407fc8ee581 +size 2402782 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000024_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000024_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cc7b02c5f401240e3d6094b184581579ca5c913e --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000024_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7774bc5142b813c5109520fb2fd5cbfb4168cbf591c12675805ccbb8c9417fe6 +size 2473732 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000025_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000025_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..145c786361b3cc16b5c1592548caf4232cad8f25 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000025_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ad123f5cdc3c1795bf6c96509e313ada76a131b7f9dabb0ca77bd45c00aadcf +size 2304712 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000026_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000026_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..979827c4a5958cbaf6e6fe79e512bd0833f93403 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000026_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:280f94ed8f3454366fdefb2e1419f96ef9675fecbf6c13017ee25eb30fcfd263 +size 2322199 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000027_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000027_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..491e3f588a1e122340a1d74b1c052a903cbf5683 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000027_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bf8d11f3f9c8ef0c8a24f71bd8917dabb5330bd6da50254672ae68f3f5477af +size 2473813 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000028_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000028_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..931099f08daadc35a08534340ce5b8ce56eac0ef --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000028_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a314a4cf9a21d08f972efd5efd6ea8f843df2110c1f2488d78a04569e70fa624 +size 2675445 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000029_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000029_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..93c244acea15225f7f1f596bfa553fc5d22c55fb --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000029_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a15d733568c53cd8813c7addea3151fb62373914ad0494b06a7fa221d4a16bb0 +size 2439259 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000030_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000030_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e0e5dfa7c9324754fce438f2757f8bddad347216 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000030_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db5a294d3bcc6ca2d28695e77d56f6547368606fd6a99cb26a575444e866ca8c +size 2309759 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000031_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000031_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4fe288871d33f051e7968d2e19349178a81b38a3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000031_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d57a3222b7bdf071aa8f78c3d492098c5e365ef1f2e3ff4760bb6ccd04f2c71b +size 2126304 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000032_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000032_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bf876861b87b1f6236311cc1b2a8072b6f2e83b1 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000032_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d167110dd5e16300bb322bcc4e771877637a29e1ad35d709b7d2ed8c551ccacd +size 2791916 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000033_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000033_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f16cbd347bbe3663397215a88c270f4026c7fca9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000033_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff72b485fb3c5683f4e5403059de76b1faf55726661d2031235e46a4b5c52f3a +size 2480850 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000034_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000034_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..600ac78d938bdf348b9eacb6b241e99211f1efdc --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000034_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bd21dcdffe063682bf6612453e6829d208f151a5cfd2dad3a67b48f4d459e53 +size 2299311 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000035_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000035_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..aa8f45f0c0e136d950ee03557a4e6c36b9e8cd18 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000035_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:116fec883846a81107b78df25894092e8954b5114a8b1fd007aa2453e33aea99 +size 2315385 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000036_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000036_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a8937f93be692a57a56f39319621f9cfb9c375fe --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000036_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50996bab6c97768ba490a66fbf56b9eff2bf361f098479b638d66d0d1209df21 +size 2547502 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000037_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000037_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2a2a6ff468b771d3268d93830c7c31fc3d2792a9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000037_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50afcc61f8149e95c23d162d11c3f88e4548e916cf6d172d20b0fb653ceeedc0 +size 2360357 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000038_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000038_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..32816eb64ab881a7724f85b390a3c9661fb8a6e2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000038_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84333ca8d374c3259d7ff2ec8bc63d4da74b19b8c9cc03893221b170e63be12b +size 2553880 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000041_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000041_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a763c0b912db63d6eff7f5d35bcda71b333cd4d4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000041_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85e2cc3941cf5dfa8cf0c9abf6e296d38e30eed87c2b46aad1c86ee3e9844cdd +size 2291035 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000042_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000042_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b24b639ec30ed082bf404ec2ea4aac7581e56100 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000042_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a922adf9fc6d0ed86f4288193a608366e61b39bc2117d837a11fb131be494684 +size 2441219 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000043_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000043_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5adac6091a7764e0522af1f88c4fcd588a33399c --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000043_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcf9bf97bbbedd174121c7ba3d04cdb988256db3f75e8de5970722217894ed8d +size 2770197 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000044_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000044_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ae88c2879bfa38c1044003e41373a8545c84e971 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000044_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ecda4fefd9710a61ec04c1f51a923d13887124c70b1e55c2a6f34de9aac4356 +size 2524848 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000045_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000045_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f2a9c534506ec6ce8bd4ce4bc2189e2c9b8137d8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000045_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaa659fe823cb713664d417dd87bd104f29ee3014c693c2bb2b8cd464c4c73b0 +size 2186885 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000046_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000046_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..51c4dc42d00ea646de2dec038122343cf293719a --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000046_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88f0adfe7120232421b65f1b879cec8291d13a9bfffb6a03a58a23fb0467c4e2 +size 2606964 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000047_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000047_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..363426b53e1116db9aeec2c498e43d360672e369 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000047_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d9e2196c95d5b7f5d0090b784f58b6ff1fed1a1f0b3aa43ee20a18b8ae7cbeb9 +size 2148078 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000048_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000048_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..bb3a836d5642a97e0c2b1156cb1899e2463b6259 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000048_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0790b1e8397dc2fe7a619b81f43cff1aa74bae4ff55f8fe4254553d5c7134d9f +size 2314252 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000049_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000049_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..10e88e50b661f9a9ed350aa8d987447c02357981 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000049_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b3f63e5106232f0fcce326a854a29a769f25c0fbd95a3866486630456e6735c +size 2491856 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000050_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000050_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d327ca63a7d99fe3fda1a7043ef689aa392c9500 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000050_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:257fa36a2f938e3bd8b493b953f2c42d021ab356e886384cb8e4b7ee29804d89 +size 2445239 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000051_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000051_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cf6439e1537f17abdd364e790ac2db77eac264da --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000051_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:010650fa6cac1f0716c1c7385e242688ebfef5f73fb0e1c4ab0cbdeaea43f321 +size 2537708 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000052_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000052_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4f66b9b551a59565005b4359bb07a907ca9ab483 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000052_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df7c6c1edceed08ab0e71e5c7d53e44438195316119b214b6a831f130c7a5886 +size 2411728 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000053_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000053_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..76a4492b974950a70df7a1e9c98712e2685e0dd4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000053_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4ab3c5d88b0471ff781d917a279f4f67b9643ded91f6227c5ac1752fabd688f +size 2561096 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000054_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000054_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1260054cbd64f034e47bbf0f2ba87e78c6eb8e8a --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000054_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da101bc7c979d2857d0c8cac46cf792269a765d53e5829870db4d18a88534163 +size 2634766 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000055_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000055_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..6e668c011d58b6aac125abbe4c1728c814c06bae --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000055_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1102308ec08a13ecd2ecfdffde5755a466c62ae408f3a22b372f714ee5e28243 +size 2357619 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000056_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000056_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ed18b56b88cddb5dd58f4813fa61400189625fc6 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000056_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:400f6a05b4ea08f169c1c033495d89df3be5636afb0485c26d1f039fcbee3ac5 +size 2526583 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000058_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000058_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..951d33c3f55fa6ef4530510d61143056ea5a1889 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000058_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ccd33e435cfbfd21c7bbb511c75e8d91f66d3f0f96d47cba43bce95f54a7af1 +size 2168994 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000059_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000059_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8f1298eee67867324ec6efdc24afceffb0bbf0be --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000059_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d13a1605f4bf16c1f466dd79ed4a6fe1de59d319b094b5d33e248dfd8b1a8ff +size 2188958 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000060_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000060_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a8a58121226955764f2db65ac84078e7b5a98712 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000060_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1d65ed9292a458308635750fa73d7f6dcc97684d22c5447dcd0dff7b7ef6260 +size 2299231 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000061_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000061_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e4fe92106465159b17055097807b30580f19307d --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000061_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cfe4f45c50e1d8c29d5191c8ec7a112fa75c58633a2ea5573a634a01813b660 +size 2362828 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000062_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000062_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..db462c42450b7307e13e92a8428ade322b66f4b1 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000062_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b29c30a50ed897cf5330a1826bc3207c996a9be8d69944dd9d126bf1708ca619 +size 2264454 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000063_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000063_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ad2d78e3ba5b56024e41caaf58706f35e7c4f6ae --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000063_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab23c27dd2f269dae41ce2e2223f96442cf23464362f90db5f63d3464ebaf89c +size 2482458 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000065_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000065_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7113af46cfd74c4e9ad5a33a196ea0f1f62d0275 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000065_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82998433dce47b4d6375eaa8302866427e841c8ab7e13456fb5e882f4679b42a +size 2546019 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000066_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000066_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..59776f9928fd2ce7f333d27e62b456a123a75a8b --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000066_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:183563bef80d301195431682043b40b5fe10a61ca01b0ec6275ea3268d791dc4 +size 2356670 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000067_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000067_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..67aab45432f7bdc632fbc1a1c2fbd31760d15807 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000067_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a534b414c49815c257a174d3cfd9df5531469d10aefbd2b6652e2aa8e2b9634c +size 2402322 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000068_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000068_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a4d219a1b185c7bcf6bed10e67c7841942f4363c --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000068_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9302cbd9776b6ac4171da59a7afe102e077844b2026d53a9d7d29b041665eeb9 +size 2204100 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000069_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000069_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2ed8fcf8b5297732ac4377904c1669b4b589bfe4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000069_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88280cf2d4ae387795c1a0f5177fba480234ff59679586e77098ae1eeae53002 +size 2763704 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000070_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000070_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7632ff56c880781f701f6da48299435195c22e6f --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000070_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc0de8e1c6b1f8d31a64d52979d2122d2509e86d9d699ced0e982f73e43385eb +size 2386485 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000071_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000071_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e2f67c2ed60f0152da56194c4ec8b783aa371abd --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000071_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6402acffae05624f7d75f29630aa983f6bc66f3580353228ed62c54850fbea16 +size 2277356 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000072_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000072_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..44056edebb5cf5fabd655c2d35d8a0c1f9f22b53 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000072_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:405a243e7e95bd2a17ec393b83c9447736c9d95102e41d954b177b05adfaf78f +size 2364425 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000073_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000073_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5515d5dd02d6f8ea9f26c1d69cdeab2457f7b664 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000073_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51e6cd82486f93ca96318955b219ee233be58befb6722d463e14c4768adab5df +size 2112905 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000074_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000074_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..db2b704f747696b8d534c5b281e719fcd38b0780 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000074_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b30d0862bb61405ac86aebd099a7fda593fd2d497428f23392708816625d200f +size 2125111 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000075_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000075_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..46be87defaa381d5560afac93767e1ab4603bd3c --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000075_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9050988af27bad369a2b9a9b126056e50213d3c1b995e2c75abb066e3c2b399a +size 2522621 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000076_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000076_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d952c0f3322a0f01b084702586c561044b46ff7b --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000076_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:feec71c2645e68235d05e30053c4a0ebf179272d48ac728459447de5381e36d9 +size 2411499 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000077_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000077_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..31b66fed300f8b0a932283db15be21e67cc51d7e --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000077_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d162d4d79b073fbd6de1a07a5b01e030ee7e7b6afe20ba45025cc8fd0305c164 +size 2149603 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000078_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000078_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..61e30118046e9e4fb85fbb709b83a9dd8637e7d9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000078_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab7dd52c5dcada503107d65680abb5668b11c85454d07110bf895b0f04342516 +size 2327777 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000079_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000079_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..90a47e4b4edf452eaab0e25f648b9ecb4d6b81f9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000079_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d588191e0405e5e044611b2ef54adf8fd664250a2a39e8f2c3b090b6668dc83 +size 2265025 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000080_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000080_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..9ab38b0dc1bb506684a87f2cd1c99fcf48e32947 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000080_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:832431be3cc7e1315c5a2d1dade871d5b39e44678c08315c9cfcb0773e2d8ecb +size 2165466 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000081_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000081_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b68d80506cbb87605ada02f6fa2da4973b32104c --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000081_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cf1562c680af9cba2e93378b7456df5ae13584cfb6ed9d9722c12bab06ad1f4 +size 2201949 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000082_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000082_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7a676fe15ec3cac13ecc86b6a97a4cb4c5bab7e8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000082_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ecff8e1c67dbd401866bdeec94dbf2b7f82ffe22ffe6b7d14ca7ee608fa2b62 +size 2392835 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000083_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000083_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a8f375ecda5e417ae4a869bc3bfec131a8f9b6ef --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000083_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:549b6bed05755e65fa76b39c95d48bae037a03c486eec260f1a6afd3ce4bb0db +size 2518406 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000084_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000084_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..afbdf9e63eff9a93a1894c5e549888cf49790901 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000084_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b55aaa50dc32753ab4257eb36dca62ecbf0a386c5bcdfbee3263228cbc8e6eb7 +size 2400416 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000085_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000085_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..89e945955feec0f5732ba956e791cdd8aa86dbb1 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000085_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6874d82f2d75da8c870f2a8622f81c590efc27e5f1f4f861aa296ab83c7d7994 +size 2197766 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000086_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000086_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..54dff485e5619cb6b38a7a2a3f6472abe49d1450 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000086_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e8026308a7937074d120a0115e4c9193342eca8286f268433c9de4368cef99b +size 2280710 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000087_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000087_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..65a1bf50e7a698f30dd89a24e39ab8e67f76af63 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000087_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4aad7d2701b9812afaac784404093c3abde5d71b884097ab9230629ecec7921b +size 2331185 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000088_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000088_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e2795dff98cff5d697d811c5964fcb995a131d22 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000088_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1491ea18a6299af77fe5949ae1ec31cfa72684bdbb4d5eebfa592d17ab35b011 +size 2304836 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000089_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000089_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2a4242daae54775f891e75442f0d834f4da2c6a4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000089_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96ad55a6712a00e6f1c58188e30c623f4ba316f16087d1f6b52a7bd6bdb1443d +size 2563399 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000090_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000090_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7d8f18b0cd0a5bc60dbfccecda91866aeb2e166a --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000090_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:602edae55d8e63e1b81b6940380a19d08988286a077933ae90d37495711e45dc +size 2424637 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000091_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000091_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4b3cbe6ac9e919706d5bf61ff4600b2468cd9b2b --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000091_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42833192fe122603b6fadcf50dfc6b7e654350bc1e1316c456efab90082c3d2b +size 2538026 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000092_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000092_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..a84fd117330ce19970679aced88ac7770e6bdab5 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000092_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:05b94c4aecbc438ed79a78eec74a0f7236c96bf68816a929754140c1f5c84c58 +size 2273046 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000093_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000093_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..49ce0a4c1809efea842bb0a9d4569dc832cf4dd6 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000093_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44f9836cc4f6183964eeebefc5df26ffdcd3e2f93f72d4888a6a56624be94b59 +size 2412430 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000094_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000094_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4e47392f54e876da352a2583162d9a8c27606dbf --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000094_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:562ad20d7b3175f2b5ef5b75dca6f399afdd4be08ebf377fd052956621d57cfd +size 2166163 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000095_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000095_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0edba188882e112f351339a5ceb7514205503706 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000095_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dc3f5876e3ac92a02946d7b8a0e1529d8bc3db8e1b58448fac3fd9511ccd0fc +size 2425227 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000096_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000096_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e925188f9b05ae2ead27fbcb9c7e7b777e9bcb97 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000096_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf45e26ea85524da719b8a7756965521a776d4a77dffde04b975f6137f0997c3 +size 2354663 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000097_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000097_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..8bd66cfdfd3d3ba6157cebb2b1e8cb40f7e6eda4 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000097_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8159bc2820870ba6efb70102c6be6395fb5db8402640af2f6fc5bbfc81691daf +size 2085600 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000098_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000098_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..dd85c9e87fc3c3d653e491f196473d4bf756c743 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000098_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa843e821fb7fca38d1b2ffed87ad63775633c2be86cd1197df966c29ad418de +size 2265719 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000099_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000099_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5b28f9009ecc91b666f267c3dca3427ec6688ed1 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000099_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3d0e573a48ae2db44abb3cc183a5a402c632dc2d5f9c411fd9e357372d9a767 +size 2284152 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000100_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000100_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..544c7ea33bbfe6df00aafc0ab00ec207d42d5a7f --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000100_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf263e1ff44a3d75ad0ae364dba441658625304cacbc3a8792719a4e5c75c57b +size 2573287 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000101_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000101_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2e6622d4302842f5655a21bc402f69fdf3698c96 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000101_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adce8ead63f604b93b5990cf1edbd7c41efe50cd0539e273e134bd9a9ac670ca +size 2177355 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000102_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000102_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..2bf459e9e72dc9183aa3a2985588a13ab5edab66 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000102_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66535b20272327e3e48004d3c802d3955bb43765d1223c4757f03c51894172bd +size 2102520 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000103_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000103_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3789b1aca5cba0783922cb278fb37d1f0627de6f --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000103_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a14bc60941eb582a15d7c7fae523b58e625a203227935bd0d7b89345c70a4cdb +size 2347854 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000104_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000104_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..cd1c618dd248bbea66353ed90a48e5e106402ae9 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000104_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3800073ad3a18acea0cf4f563f207d8b4879472bc368b976f9efcd3e6e536b78 +size 2157989 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000105_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000105_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..003711127d4015d3a478a03d4a4fd1cd6080b4d0 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000105_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc581c25806e18828730d484c545de42908596ba4e964efbd7c5d1c68836a3a4 +size 2381542 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000106_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000106_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..469a3abc414d3e468d4fae2f3c66923d36ad5cf2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000106_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33540eeb6721c809fe8d9aff7796aaa1c1272bbc40be7b2556cd0d2985da18fc +size 2517457 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000107_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000107_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ed74811a67536c96aaa273675c2936bfb96f8608 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000107_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c9015bc9b61521b80e0230cb6a021826aed4fcc8dde835c275ed2136195a0b90 +size 2545258 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000108_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000108_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..f9eed1ae49ebfe2fed7316695c9f378d73e3f1b2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000108_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90356b1ac351f1cc0abda4c7602d9a2eced55ddee6703841f5d2325fe6cc1a7e +size 2439504 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000109_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000109_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..1a85dee811b9b1f52ef619e69fa5217a260dba4a --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000109_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f450f29213c21f8cccc2c5503842deb27280a6a32a471a548f4db5e547bbd2d +size 2244443 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000110_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000110_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..81a718802685934042a004d2c5ce3e62e3591f98 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000110_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0db761d27d6430ee6d9c46b9d400c4ed025fa7b4052d47681c93805386a639a0 +size 2362044 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000111_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000111_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..5f2b48304c3ae9015b5db1a8a58bd86d8a2429cb --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000111_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd21ba0dc69f14575f0b50b403f482d3c9a8c07014b93dd9650fbddd663c9740 +size 2289496 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000112_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000112_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b05f6bd24b87863a602894ac35212a817597dd34 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000112_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d9f4f31ec0f147ae609398990b12ea240492ec24e2cdbdf9d1620a00ecf3939 +size 2307078 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000113_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000113_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..fe5e89e1b10eae16966b32eec34e1dfbe9303b8f --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000113_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b96801554cd39dc1442c70ca6ed156500f8848af6263318e90be1796e5f37798 +size 2166869 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000114_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000114_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..9c3c9fd1e71d8f6d9b247ef4ae1f668c249d49e2 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000114_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c0577643237a07feac620cfe7f29b8c7b7245839e051247fc9a1bfb52cf13ec +size 2232329 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000115_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000115_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..24e3bcf55d9a3d012f7d9009aaece3717a600143 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000115_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:553f5314f94ae17e878e01e87b517794f869ed0d55ed23821638fd063c104f12 +size 2218324 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000116_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000116_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b3e107d219a6f2345a87143cb3e52e63c0cb8c35 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000116_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9d41f84fe8925592416949ea5e419d5d10a36c04637aa1dead87b83d9e529d1 +size 2423938 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000117_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000117_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b057114705ee5633d6efebc60c914d18c599d042 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000117_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f92a77716d738b2af3c99ed6ee7f4314970eeb17cbb795a35d31de62bfb4854 +size 2356522 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000118_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000118_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4e5603577e34bb781e31392ef52e923da9f75e9d --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000118_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2de1fe285704d78d2c048565eef82395e2a88a796ed3c6a3cef1334a05cc2fb1 +size 2180778 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000119_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000119_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7c0eeb3c144950e743869a8018253c641cbf9f1d --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000119_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e53151c112e37305f3dce7cda0044dc4617376b5440c2c14dc581f3258eb53c0 +size 2396317 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000120_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000120_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..48ff832691dacdf5ae887482e3e346af1d365861 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000120_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:875ac90f3fe7532aaf8921c388fb5a80301dc1d924a742aa1ee154dad2c94012 +size 2419105 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000121_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000121_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d914b1bba480dcece39d16006726d37dc8f28bd3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000121_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf4995c5e04916103477e16f66cfcf41fadc31616127f6990645e7bea933b7a0 +size 2186220 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000122_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000122_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..dc3b8f164623c11e434342f94d3772aeceb10eff --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000122_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8f9822d7ae6b498f4c55cf072f2fcdd617406241b95d54ad5a3a7b9b1c88011 +size 2269235 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000123_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000123_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..3facff11fe06a2b28ee4db1833eb45d2d077b583 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000123_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14d0ae00a658f94f64d9d20c7b044b6a6f15d56c509aa6a020eca0fe791ef854 +size 2478346 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000124_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000124_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..e03605eeb51fe80d5abb211c620642482febb3f0 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000124_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d048b4a9ea2bc6e5a85bd7a8c6659d43be5d0ea6b84f3b5701278660e398531 +size 2019305 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000125_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000125_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..22501590576156834399031455ebc4086143e247 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000125_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f866d2b17877024d8c917695089b41ee4b53070051b2c5ba03343d63844b539 +size 2410833 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000126_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000126_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..97a595128ffda185dd1779e17b666e0027bcf83b --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000126_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:860c831dea784118f9ba695c9b08ba699f1f704912705fa811c2dc689a15dafd +size 2165956 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000127_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000127_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..703f8fcd8b6d69b3ac8b3d77bc67cbd5430e3c68 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000127_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2253bd94d30d265cc87fd1d64ba8fd0bc2672e6811be0ce02d0436cb896ba243 +size 2224561 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000128_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000128_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7f2f2107f170080aa6e77a502282fd3176767c6c --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000128_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45bba0cdccc63295dd1229d47e66bc6bc224821175401bfc652b8768e845b45d +size 2211361 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000129_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000129_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4acb5ecbb497e724315f8e94412dcd2c3d15b99b --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000129_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:83f88f06b59e861c36d377303b06e82f7a7f56add413211a242b60741407d387 +size 2426914 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000130_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000130_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..66822a1cabd8995773817e50f65c8d6cf2be02e3 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000130_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d83b769ba05d3aaaec7d7ec25631bb2535386ce212a55edd9a2a9d651634243a +size 2283892 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000131_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000131_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ae3463238863afb9c12a04c280c18e01e32c3909 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000131_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f948294baee79f1d0ba879d16d880dfcf7892120f16147cebb57854156c0d6e +size 2263681 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000133_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000133_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..4cf030bc37070bfb95f2e40ff445fe13b200d16b --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000133_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f88814e3e51c99affaf4a2d5d9fc405bf76b9b54c1ca24ca04cc132089419f7 +size 2272755 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000134_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000134_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..b256f303516b192da5a0857da93284aa063708c8 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000134_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:598c3af30cdb8de835886bd30507027552c4e74acfcdb4d51e7ee1b36e0cca58 +size 2488641 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000135_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000135_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..7277b0d352371cc53e85529f91188159292c869c --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000135_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fbf7322d9b93859587afdad2a8e804db88dbc5a8ee8c0ce54a754530ff0d03e +size 2594357 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000136_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000136_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0d3cef0a227a440a7aa50d744339a2e50936226d --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000136_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eda1a10348ad43787599f2c5b83f90943863d514476e08af090e37570092c8b +size 2446423 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000137_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000137_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..25ff6dd9c10de77cd62fe8bd732c556588947904 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000137_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd1eb04ad736f05b718218de4cb22d24ccfd6ae184911e06c0ac0847cf9d51ee +size 2311922 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000138_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000138_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..d46e04f962cc5ca1c013d1d77ceee0f4b41864e5 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000138_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dbd9661ede3086ace10311a71c8fccc8e4ebb225af1d6ca2ca72d1f2afaf72e +size 2384122 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000140_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000140_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..0dfccba9cd96397a3aea84711e35ea4cc0b44394 --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000140_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e943bbfee7ff4c59a153a1ee8c886a78a6aa997de0d9c5c913f60d2f982ceaf7 +size 2206608 diff --git a/cityscapes/leftImg8bit/train/weimar/weimar_000141_000019_leftImg8bit.png b/cityscapes/leftImg8bit/train/weimar/weimar_000141_000019_leftImg8bit.png new file mode 100644 index 0000000000000000000000000000000000000000..ef3f94473752e0fabd977cff7996315c9c5f08fe --- /dev/null +++ b/cityscapes/leftImg8bit/train/weimar/weimar_000141_000019_leftImg8bit.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25ae649e81b376a31fc3629f7cdd5ab82b7cf08794d79c4da79266cbd3a35dbf +size 2242452 diff --git a/segmentation/configs/_base_/datasets/ade20k.py b/segmentation/configs/_base_/datasets/ade20k.py new file mode 100644 index 0000000000000000000000000000000000000000..30f501a3036e08ba26e8ef35ae164f8e4c333cf3 --- /dev/null +++ b/segmentation/configs/_base_/datasets/ade20k.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'ADE20KDataset' +data_root = 'data/ADEChallengeData2016' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/chase_db1.py b/segmentation/configs/_base_/datasets/chase_db1.py new file mode 100644 index 0000000000000000000000000000000000000000..298594ea925f87f22b37094a2ec50e370aec96a0 --- /dev/null +++ b/segmentation/configs/_base_/datasets/chase_db1.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'ChaseDB1Dataset' +data_root = 'data/CHASE_DB1' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (960, 999) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/cityscapes.py b/segmentation/configs/_base_/datasets/cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..f21867c63e1835f6fceb61f066e802fd8fd2a735 --- /dev/null +++ b/segmentation/configs/_base_/datasets/cityscapes.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/train', + ann_dir='gtFine/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/cityscapes_1024x1024.py b/segmentation/configs/_base_/datasets/cityscapes_1024x1024.py new file mode 100644 index 0000000000000000000000000000000000000000..f98d929723b4539323ba6c9db867dfa4b01ffb22 --- /dev/null +++ b/segmentation/configs/_base_/datasets/cityscapes_1024x1024.py @@ -0,0 +1,35 @@ +_base_ = './cityscapes.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/cityscapes_extra.py b/segmentation/configs/_base_/datasets/cityscapes_extra.py new file mode 100644 index 0000000000000000000000000000000000000000..031df1bbfe8a939325464d92b2236fd23472c1ca --- /dev/null +++ b/segmentation/configs/_base_/datasets/cityscapes_extra.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir=['leftImg8bit/train', 'leftImg8bit/train_extra'], + ann_dir=['gtFine/train', 'refinement_final/train_extra'], + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/cityscapes_extra_1024x1024.py b/segmentation/configs/_base_/datasets/cityscapes_extra_1024x1024.py new file mode 100644 index 0000000000000000000000000000000000000000..280cf1c6e88bae6a5b12dd73ea349b6ffb94c5cb --- /dev/null +++ b/segmentation/configs/_base_/datasets/cityscapes_extra_1024x1024.py @@ -0,0 +1,35 @@ +_base_ = './cityscapes_extra.py' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/coco-stuff10k.py b/segmentation/configs/_base_/datasets/coco-stuff10k.py new file mode 100644 index 0000000000000000000000000000000000000000..ec0496928b9464406a4013023c553cf3e7da526b --- /dev/null +++ b/segmentation/configs/_base_/datasets/coco-stuff10k.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'COCOStuffDataset' +data_root = 'data/coco_stuff10k' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + reduce_zero_label=True, + img_dir='images/train2014', + ann_dir='annotations/train2014', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + reduce_zero_label=True, + img_dir='images/test2014', + ann_dir='annotations/test2014', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + reduce_zero_label=True, + img_dir='images/test2014', + ann_dir='annotations/test2014', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/coco-stuff164k.py b/segmentation/configs/_base_/datasets/coco-stuff164k.py new file mode 100644 index 0000000000000000000000000000000000000000..a6a38f2ac4d1a39d4a89bb101462d3ec805c3aff --- /dev/null +++ b/segmentation/configs/_base_/datasets/coco-stuff164k.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'COCOStuffDataset' +data_root = 'data/coco_stuff164k' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/train2017', + ann_dir='annotations/train2017', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/val2017', + ann_dir='annotations/val2017', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/val2017', + ann_dir='annotations/val2017', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/drive.py b/segmentation/configs/_base_/datasets/drive.py new file mode 100644 index 0000000000000000000000000000000000000000..06e8ff606e0d2a4514ec8b7d2c6c436a32efcbf4 --- /dev/null +++ b/segmentation/configs/_base_/datasets/drive.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'DRIVEDataset' +data_root = 'data/DRIVE' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (584, 565) +crop_size = (64, 64) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/hrf.py b/segmentation/configs/_base_/datasets/hrf.py new file mode 100644 index 0000000000000000000000000000000000000000..242d790eb1b83e75cf6b7eaa7a35c674099311ad --- /dev/null +++ b/segmentation/configs/_base_/datasets/hrf.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'HRFDataset' +data_root = 'data/HRF' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (2336, 3504) +crop_size = (256, 256) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/loveda.py b/segmentation/configs/_base_/datasets/loveda.py new file mode 100644 index 0000000000000000000000000000000000000000..e5533569594601f52a4a4f164ba361dd0ed16da7 --- /dev/null +++ b/segmentation/configs/_base_/datasets/loveda.py @@ -0,0 +1,54 @@ +# dataset settings +dataset_type = 'LoveDADataset' +data_root = 'data/loveDA' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(1024, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/train', + ann_dir='ann_dir/train', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='img_dir/val', + ann_dir='ann_dir/val', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/mapillary.py b/segmentation/configs/_base_/datasets/mapillary.py new file mode 100644 index 0000000000000000000000000000000000000000..3e62497b6ead8cdc0446713043b4bced4cf281c9 --- /dev/null +++ b/segmentation/configs/_base_/datasets/mapillary.py @@ -0,0 +1,55 @@ +# dataset settings +dataset_type = 'MapillaryDataset' +data_root = 'data/Mapillary/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='MapillaryHack'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 1.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root='data/Mapillary/', + img_dir=['training/images', 'validation/images'], + ann_dir=['training/labels', 'validation/labels'], + pipeline=train_pipeline), + val=dict( + type='CityscapesDataset', + data_root='data/cityscapes/', + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline), + test=dict( + type='CityscapesDataset', + data_root='data/cityscapes/', + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/mapillary_1024x1024.py b/segmentation/configs/_base_/datasets/mapillary_1024x1024.py new file mode 100644 index 0000000000000000000000000000000000000000..1c81ea2fb82cbc41b16208dfb66766368f459a99 --- /dev/null +++ b/segmentation/configs/_base_/datasets/mapillary_1024x1024.py @@ -0,0 +1,55 @@ +# dataset settings +dataset_type = 'MapillaryDataset' +data_root = 'data/Mapillary/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='MapillaryHack'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 1.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root='data/Mapillary/', + img_dir=['training/images', 'validation/images'], + ann_dir=['training/labels', 'validation/labels'], + pipeline=train_pipeline), + val=dict( + type='CityscapesDataset', + data_root='data/cityscapes/', + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline), + test=dict( + type='CityscapesDataset', + data_root='data/cityscapes/', + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/mapillary_896x896.py b/segmentation/configs/_base_/datasets/mapillary_896x896.py new file mode 100644 index 0000000000000000000000000000000000000000..48657319fec021eed2c9f293301521694e28fe9e --- /dev/null +++ b/segmentation/configs/_base_/datasets/mapillary_896x896.py @@ -0,0 +1,55 @@ +# dataset settings +dataset_type = 'MapillaryDataset' +data_root = 'data/Mapillary/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (896, 896) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='MapillaryHack'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 1.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type=dataset_type, + data_root='data/Mapillary/', + img_dir=['training/images', 'validation/images'], + ann_dir=['training/labels', 'validation/labels'], + pipeline=train_pipeline), + val=dict( + type='CityscapesDataset', + data_root='data/cityscapes/', + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline), + test=dict( + type='CityscapesDataset', + data_root='data/cityscapes/', + img_dir='leftImg8bit/val', + ann_dir='gtFine/val', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/nyu_depth_v2.py b/segmentation/configs/_base_/datasets/nyu_depth_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..bac9443c2613472a1d90ef6c26b4d94249ab54f1 --- /dev/null +++ b/segmentation/configs/_base_/datasets/nyu_depth_v2.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'NYUDepthV2Dataset' +data_root = 'data/nyu_depth_v2/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=(640, 480), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(640, 480), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='image', + ann_dir='label40', + split='train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='image', + ann_dir='label40', + split='test.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='image', + ann_dir='label40', + split='test.txt', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/pascal_context.py b/segmentation/configs/_base_/datasets/pascal_context.py new file mode 100644 index 0000000000000000000000000000000000000000..ff65bad1b86d7e3a5980bb5b9fc55798dc8df5f4 --- /dev/null +++ b/segmentation/configs/_base_/datasets/pascal_context.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'PascalContextDataset' +data_root = 'data/VOCdevkit/VOC2010/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (520, 520) +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/pascal_context_59.py b/segmentation/configs/_base_/datasets/pascal_context_59.py new file mode 100644 index 0000000000000000000000000000000000000000..37585abab89834b95cd5bdd993b994fca1db65f6 --- /dev/null +++ b/segmentation/configs/_base_/datasets/pascal_context_59.py @@ -0,0 +1,60 @@ +# dataset settings +dataset_type = 'PascalContextDataset59' +data_root = 'data/VOCdevkit/VOC2010/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (520, 520) +crop_size = (480, 480) + +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations', reduce_zero_label=True), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClassContext', + split='ImageSets/SegmentationContext/val.txt', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/pascal_voc12.py b/segmentation/configs/_base_/datasets/pascal_voc12.py new file mode 100644 index 0000000000000000000000000000000000000000..ba1d42d0c5781f56dc177d860d856bb34adce555 --- /dev/null +++ b/segmentation/configs/_base_/datasets/pascal_voc12.py @@ -0,0 +1,57 @@ +# dataset settings +dataset_type = 'PascalVOCDataset' +data_root = 'data/VOCdevkit/VOC2012' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 512) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']), +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 512), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/train.txt', + pipeline=train_pipeline), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/val.txt', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='JPEGImages', + ann_dir='SegmentationClass', + split='ImageSets/Segmentation/val.txt', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/datasets/pascal_voc12_aug.py b/segmentation/configs/_base_/datasets/pascal_voc12_aug.py new file mode 100644 index 0000000000000000000000000000000000000000..3f23b6717d53ad29f02dd15046802a2631a5076b --- /dev/null +++ b/segmentation/configs/_base_/datasets/pascal_voc12_aug.py @@ -0,0 +1,9 @@ +_base_ = './pascal_voc12.py' +# dataset settings +data = dict( + train=dict( + ann_dir=['SegmentationClass', 'SegmentationClassAug'], + split=[ + 'ImageSets/Segmentation/train.txt', + 'ImageSets/Segmentation/aug.txt' + ])) diff --git a/segmentation/configs/_base_/datasets/potsdam.py b/segmentation/configs/_base_/datasets/potsdam.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/segmentation/configs/_base_/datasets/stare.py b/segmentation/configs/_base_/datasets/stare.py new file mode 100644 index 0000000000000000000000000000000000000000..3f71b25488cc11a6b4d582ac52b5a24e1ad1cf8e --- /dev/null +++ b/segmentation/configs/_base_/datasets/stare.py @@ -0,0 +1,59 @@ +# dataset settings +dataset_type = 'STAREDataset' +data_root = 'data/STARE' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +img_scale = (605, 700) +crop_size = (128, 128) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] + +data = dict( + samples_per_gpu=4, + workers_per_gpu=4, + train=dict( + type='RepeatDataset', + times=40000, + dataset=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/training', + ann_dir='annotations/training', + pipeline=train_pipeline)), + val=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline), + test=dict( + type=dataset_type, + data_root=data_root, + img_dir='images/validation', + ann_dir='annotations/validation', + pipeline=test_pipeline)) diff --git a/segmentation/configs/_base_/default_runtime.py b/segmentation/configs/_base_/default_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..8f33f451b53c90c73b236a90705133d8f86e4719 --- /dev/null +++ b/segmentation/configs/_base_/default_runtime.py @@ -0,0 +1,14 @@ +# yapf:disable +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +# yapf:enable +dist_params = dict(backend="nccl") +log_level = "INFO" +load_from = None +resume_from = None +workflow = [("train", 1)] +cudnn_benchmark = True diff --git a/segmentation/configs/_base_/models/mask2former_beit.py b/segmentation/configs/_base_/models/mask2former_beit.py new file mode 100644 index 0000000000000000000000000000000000000000..2474dfa76c23a8fa199a4c06f80c4bd53c07308e --- /dev/null +++ b/segmentation/configs/_base_/models/mask2former_beit.py @@ -0,0 +1,138 @@ +# model_cfg +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = num_things_classes + num_stuff_classes +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='XCiT', + patch_size=16, + embed_dim=384, + depth=12, + num_heads=8, + mlp_ratio=4, + qkv_bias=True, + use_abs_pos_emb=True, + use_rel_pos_bias=False, + ), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside + # strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=num_things_classes, + num_stuff_classes=num_stuff_classes, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0)), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + # For now, the dataset does not support + # evaluating semantic segmentation metric. + semantic_on=False, + instance_on=True, + # max_per_image is for instance segmentation. + max_per_image=100, + iou_thr=0.8, + # In Mask2Former's panoptic postprocessing, + # it will filter mask area where score is less than 0.5 . + filter_low_score=True), + init_cfg=None) + +# find_unused_parameters = True diff --git a/segmentation/configs/_base_/models/segformer_mit-b0.py b/segmentation/configs/_base_/models/segformer_mit-b0.py new file mode 100644 index 0000000000000000000000000000000000000000..5b3e07331d0b42961e2969b75ca364f7b535b43f --- /dev/null +++ b/segmentation/configs/_base_/models/segformer_mit-b0.py @@ -0,0 +1,34 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='MixVisionTransformer', + in_channels=3, + embed_dims=32, + num_stages=4, + num_layers=[2, 2, 2, 2], + num_heads=[1, 2, 5, 8], + patch_sizes=[7, 3, 3, 3], + sr_ratios=[8, 4, 2, 1], + out_indices=(0, 1, 2, 3), + mlp_ratio=4, + qkv_bias=True, + drop_rate=0.0, + attn_drop_rate=0.0, + drop_path_rate=0.1), + decode_head=dict( + type='SegformerHead', + in_channels=[32, 64, 160, 256], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/segmentation/configs/_base_/models/upernet_r50.py b/segmentation/configs/_base_/models/upernet_r50.py new file mode 100644 index 0000000000000000000000000000000000000000..10974962fdd7136031fd06de1700f497d355ceaa --- /dev/null +++ b/segmentation/configs/_base_/models/upernet_r50.py @@ -0,0 +1,44 @@ +# model settings +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(0, 1, 2, 3), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True), + decode_head=dict( + type='UPerHead', + in_channels=[256, 512, 1024, 2048], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=19, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) diff --git a/segmentation/configs/_base_/schedules/schedule_160k.py b/segmentation/configs/_base_/schedules/schedule_160k.py new file mode 100644 index 0000000000000000000000000000000000000000..39630f215bd5b952407f8bbf1f0e6f91038767a5 --- /dev/null +++ b/segmentation/configs/_base_/schedules/schedule_160k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=16000) +evaluation = dict(interval=16000, metric='mIoU', pre_eval=True) diff --git a/segmentation/configs/_base_/schedules/schedule_20k.py b/segmentation/configs/_base_/schedules/schedule_20k.py new file mode 100644 index 0000000000000000000000000000000000000000..73c7021972bb8e955440b5afc8eaf0a4853b98a7 --- /dev/null +++ b/segmentation/configs/_base_/schedules/schedule_20k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=20000) +checkpoint_config = dict(by_epoch=False, interval=2000) +evaluation = dict(interval=2000, metric='mIoU', pre_eval=True) diff --git a/segmentation/configs/_base_/schedules/schedule_320k.py b/segmentation/configs/_base_/schedules/schedule_320k.py new file mode 100644 index 0000000000000000000000000000000000000000..a0b230626f638fc2072dbc78cca834edce3fdc23 --- /dev/null +++ b/segmentation/configs/_base_/schedules/schedule_320k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=320000) +checkpoint_config = dict(by_epoch=False, interval=32000) +evaluation = dict(interval=32000, metric='mIoU') diff --git a/segmentation/configs/_base_/schedules/schedule_40k.py b/segmentation/configs/_base_/schedules/schedule_40k.py new file mode 100644 index 0000000000000000000000000000000000000000..d2c502325944a1c5aa894283f28c610d67ab4da8 --- /dev/null +++ b/segmentation/configs/_base_/schedules/schedule_40k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=40000) +checkpoint_config = dict(by_epoch=False, interval=4000) +evaluation = dict(interval=4000, metric='mIoU', pre_eval=True) diff --git a/segmentation/configs/_base_/schedules/schedule_80k.py b/segmentation/configs/_base_/schedules/schedule_80k.py new file mode 100644 index 0000000000000000000000000000000000000000..8365a878e9e19fff1080b0268ee26405eea34e43 --- /dev/null +++ b/segmentation/configs/_base_/schedules/schedule_80k.py @@ -0,0 +1,9 @@ +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) +# runtime settings +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=8000) +evaluation = dict(interval=8000, metric='mIoU', pre_eval=True) diff --git a/segmentation/configs/cityscapes/README.md b/segmentation/configs/cityscapes/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f416d560d1cd467389f154ad3ba3d60db8c9c5df --- /dev/null +++ b/segmentation/configs/cityscapes/README.md @@ -0,0 +1,46 @@ +# Cityscapes + +Introduced by Cordts et al. in [The Cityscapes Dataset for Semantic Urban Scene Understanding](https://paperswithcode.com/paper/the-cityscapes-dataset-for-semantic-urban). + +Cityscapes is a large-scale database which focuses on semantic understanding of urban street scenes. It provides semantic, instance-wise, and dense pixel annotations for 30 classes grouped into 8 categories (flat surfaces, humans, vehicles, constructions, objects, nature, sky, and void). The dataset consists of around 5000 fine annotated images and 20000 coarse annotated ones. Data was captured in 50 cities during several months, daytimes, and good weather conditions. It was originally recorded as video so the frames were manually selected to have the following features: large number of dynamic objects, varying scene layout, and varying background. + +## Model Zoo + +### UperNet + InternImage + +| backbone | resolution | mIoU (ss/ms) | train speed | train time | #params | FLOPs | Config | Download | +| :------------: | :--------: | :-----------: | :----------: | :--------: | :-----: | :---: | :------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| InternImage-T | 512x1024 | 82.58 / 83.40 | 0.32s / iter | 14.5h | 59M | 1889G | [config](./upernet_internimage_t_512x1024_160k_cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_t_512x1024_160k_cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/upernet_internimage_t_512x1024_160k_cityscapes.log.json) | +| InternImage-S | 512x1024 | 82.74 / 83.45 | 0.36s / iter | 16.5h | 80M | 2035G | [config](./upernet_internimage_s_512x1024_160k_cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_s_512x1024_160k_cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/upernet_internimage_s_512x1024_160k_cityscapes.log.json) | +| InternImage-B | 512x1024 | 83.18 / 83.97 | 0.39s / iter | 17h | 128M | 2369G | [config](./upernet_internimage_b_512x1024_160k_cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_b_512x1024_160k_cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/upernet_internimage_b_512x1024_160k_cityscapes.log.json) | +| InternImage-L | 512x1024 | 83.68 / 84.41 | 0.50s / iter | 23h | 256M | 3234G | [config](./upernet_internimage_l_512x1024_160k_cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_l_512x1024_160k_cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/upernet_internimage_l_512x1024_160k_cityscapes.log.json) | +| InternImage-XL | 512x1024 | 83.62 / 84.28 | 0.56s / iter | 26h | 368M | 4022G | [config](./upernet_internimage_xl_512x1024_160k_cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_xl_512x1024_160k_cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/upernet_internimage_xl_512x1024_160k_cityscapes.log.json) | + +- Training speed is measured with A100 GPU. +- Please set `with_cp=True` to save memory if you meet `out-of-memory` issues. + +### UperNet + InternImage (with additional data) + +Mapillary 80k + Cityscapes (w/ coarse data) 160k + +| backbone | resolution | mIoU (ss/ms) | train speed | train time | #params | FLOPs | Config | Download | +| :------------: | :--------: | :-----------: | :----------: | :--------: | :-----: | :---: | :----------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| InternImage-L | 512x1024 | 85.94 / 86.22 | 0.50s / iter | 23h | 256M | 3234G | [config](./upernet_internimage_l_512x1024_160k_mapillary2cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_l_512x1024_160k_mapillary2cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/upernet_internimage_l_512x1024_160k_mapillary2cityscapes.log.json) | +| InternImage-XL | 512x1024 | 86.20 / 86.42 | 0.56s / iter | 26h | 368M | 4022G | [config](./upernet_internimage_xl_512x1024_160k_mapillary2cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/upernet_internimage_xl_512x1024_160k_mapillary2cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/upernet_internimage_xl_512x1024_160k_mapillary2cityscapes.log.json) | + +### SegFormerHead + InternImage (with additional data) + +Mapillary 80k + Cityscapes (w/ coarse data) 160k + +| backbone | resolution | mIoU (ss/ms) | train speed | train time | #params | FLOPs | Config | Download | +| :------------: | :--------: | :-----------: | :----------: | :--------: | :-----: | :---: | :------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| InternImage-L | 512x1024 | 85.16 / 85.67 | 0.37s / iter | 17h | 220M | 1580G | [config](./segformer_internimage_l_512x1024_160k_mapillary2cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_l_512x1024_160k_mapillary2cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/segformer_internimage_l_512x1024_160k_mapillary2cityscapes.log.json) | +| InternImage-XL | 512x1024 | 85.41 / 85.93 | 0.43s / iter | 19.5h | 330M | 2364G | [config](./segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.log.json) | + +### Mask2Former + InternImage (with additional data) + +Mapillary 80k + Cityscapes (w/ coarse data) 80k + +| backbone | resolution | mIoU (ss/ms) | #params | FLOPs | Config | Download | +| :-----------: | :--------: | :-----------: | :-----: | :---: | :-------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| InternImage-H | 1024x1024 | 86.37 / 86.96 | 1094M | 7878G | [config](./mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py) | [ckpt](https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.pth) \| [log](https://huggingface.co/OpenGVLab/InternImage/raw/main/mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.log.json) | diff --git a/segmentation/configs/cityscapes/mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py b/segmentation/configs/cityscapes/mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..8efeb2e514206b25379d2805419ea7691346882d --- /dev/null +++ b/segmentation/configs/cityscapes/mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py @@ -0,0 +1,163 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/mask2former_beit.py', '../_base_/datasets/cityscapes_extra_1024x1024.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' +] +num_classes = 19 +crop_size = (1024, 1024) +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +model = dict( + type='EncoderDecoderMask2Former', + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4., + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, # for InternImage-H/G + res_post_norm=True, # for InternImage-H/G + level2_post_norm=True, # for InternImage-H/G + level2_post_norm_block_ids=[5, 11, 17, 23, 29], # for InternImage-H/G + center_feature_scale=True, # for InternImage-H/G + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + num_classes=num_classes, + num_queries=100, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + with_cp=False, # set with_cp=True to save memory + act_cfg=dict(type='ReLU', inplace=True)), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + with_cp=False, # set with_cp=True to save memory + add_identity=True), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[1.0] * num_classes + [0.1]) + ), + test_cfg=dict(mode='slide', crop_size=crop_size, stride=(512, 512))) +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']), + ]) +] +optimizer = dict( + _delete_=True, type='AdamW', lr=1e-5, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=50, layer_decay_rate=0.95, + depths=[6, 6, 32, 6], offset_lr_scale=1.0)) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2, + train=dict(pipeline=train_pipeline), + val=dict(pipeline=test_pipeline), + test=dict(pipeline=test_pipeline)) +runner = dict(type='IterBasedRunner') +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=2000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/segformer_internimage_l_512x1024_160k_mapillary2cityscapes.py b/segmentation/configs/cityscapes/segformer_internimage_l_512x1024_160k_mapillary2cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..c5647fcc5a83211cc5d703479265fc32e56c2296 --- /dev/null +++ b/segmentation/configs/cityscapes/segformer_internimage_l_512x1024_160k_mapillary2cityscapes.py @@ -0,0 +1,46 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/segformer_mit-b0.py', '../_base_/datasets/cityscapes_extra.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_l_512x1024_80k_mapillary.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=160, + depths=[5, 5, 22, 5], + groups=[10, 20, 40, 80], + mlp_ratio=4., + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=2.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict(num_classes=150, in_channels=[160, 320, 640, 1280]), + test_cfg=dict(mode='whole')) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00002, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=37, layer_decay_rate=0.94, + depths=[5, 5, 22, 5], offset_lr_scale=1.0)) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=4000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.py b/segmentation/configs/cityscapes/segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..6004d72b64b8fbbfaf777f5bc8b94ff59318a36c --- /dev/null +++ b/segmentation/configs/cityscapes/segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.py @@ -0,0 +1,46 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/segformer_mit-b0.py', '../_base_/datasets/cityscapes_extra.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_xl_512x1024_80k_mapillary.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=192, + depths=[5, 5, 24, 5], + groups=[12, 24, 48, 96], + mlp_ratio=4., + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=2.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict(num_classes=150, in_channels=[192, 384, 768, 1536]), + test_cfg=dict(mode='whole')) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00002, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=39, layer_decay_rate=0.94, + depths=[5, 5, 24, 5], offset_lr_scale=1.0)) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=4000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/upernet_internimage_b_512x1024_160k_cityscapes.py b/segmentation/configs/cityscapes/upernet_internimage_b_512x1024_160k_cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..8d6cd39826e8644c7469584d94a8689b0bd8569b --- /dev/null +++ b/segmentation/configs/cityscapes/upernet_internimage_b_512x1024_160k_cityscapes.py @@ -0,0 +1,47 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +pretrained = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_b_1k_224.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=112, + depths=[4, 4, 21, 4], + groups=[7, 14, 28, 56], + mlp_ratio=4., + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=1.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(num_classes=150, in_channels=[112, 224, 448, 896]), + auxiliary_head=dict(num_classes=150, in_channels=448), + test_cfg=dict(mode='whole') +) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=33, layer_decay_rate=1.0, + depths=[4, 4, 21, 4])) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data=dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=16000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/upernet_internimage_l_512x1024_160k_cityscapes.py b/segmentation/configs/cityscapes/upernet_internimage_l_512x1024_160k_cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..ff43dfc9036d1c06d67e57bec8cd5237503c7f49 --- /dev/null +++ b/segmentation/configs/cityscapes/upernet_internimage_l_512x1024_160k_cityscapes.py @@ -0,0 +1,47 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +pretrained = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_l_22k_192to384.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=160, + depths=[5, 5, 22, 5], + groups=[10, 20, 40, 80], + mlp_ratio=4., + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=2.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(num_classes=150, in_channels=[160, 320, 640, 1280]), + auxiliary_head=dict(num_classes=150, in_channels=640), + test_cfg=dict(mode='whole')) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00002, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=37, layer_decay_rate=0.94, + depths=[5, 5, 22, 5], offset_lr_scale=1.0)) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=16000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/upernet_internimage_l_512x1024_160k_mapillary2cityscapes.py b/segmentation/configs/cityscapes/upernet_internimage_l_512x1024_160k_mapillary2cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..80eca838f4b5d88ad8e2ddf18198d75cc615fa4e --- /dev/null +++ b/segmentation/configs/cityscapes/upernet_internimage_l_512x1024_160k_mapillary2cityscapes.py @@ -0,0 +1,47 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes_extra.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_l_512x1024_80k_mapillary.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=160, + depths=[5, 5, 22, 5], + groups=[10, 20, 40, 80], + mlp_ratio=4., + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=2.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict(num_classes=150, in_channels=[160, 320, 640, 1280]), + auxiliary_head=dict(num_classes=150, in_channels=640), + test_cfg=dict(mode='whole')) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00002, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=37, layer_decay_rate=0.94, + depths=[5, 5, 22, 5], offset_lr_scale=1.0)) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=4000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/upernet_internimage_s_512x1024_160k_cityscapes.py b/segmentation/configs/cityscapes/upernet_internimage_s_512x1024_160k_cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..d5c87d33e52b0586e460a4911983d0c2c91777f5 --- /dev/null +++ b/segmentation/configs/cityscapes/upernet_internimage_s_512x1024_160k_cityscapes.py @@ -0,0 +1,46 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +pretrained = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_s_1k_224.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=80, + depths=[4, 4, 21, 4], + groups=[5, 10, 20, 40], + mlp_ratio=4., + drop_path_rate=0.3, + norm_layer='LN', + layer_scale=1.0, + offset_scale=1.0, + post_norm=True, + with_cp=False, + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(num_classes=150, in_channels=[80, 160, 320, 640]), + auxiliary_head=dict(num_classes=150, in_channels=320), + test_cfg=dict(mode='whole') +) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=33, layer_decay_rate=1.0, + depths=[4, 4, 21, 4])) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data=dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=16000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/upernet_internimage_t_512x1024_160k_cityscapes.py b/segmentation/configs/cityscapes/upernet_internimage_t_512x1024_160k_cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..b082ef0ef942fa80261c3f6082ca80059b7deccc --- /dev/null +++ b/segmentation/configs/cityscapes/upernet_internimage_t_512x1024_160k_cityscapes.py @@ -0,0 +1,47 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +pretrained = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_t_1k_224.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=64, + depths=[4, 4, 18, 4], + groups=[4, 8, 16, 32], + mlp_ratio=4., + drop_path_rate=0.2, + norm_layer='LN', + layer_scale=1.0, + offset_scale=1.0, + post_norm=False, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(num_classes=150, in_channels=[64, 128, 256, 512]), + auxiliary_head=dict(num_classes=150, in_channels=256), + test_cfg=dict(mode='whole') +) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=30, layer_decay_rate=1.0, + depths=[4, 4, 18, 4])) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data=dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=16000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/upernet_internimage_xl_512x1024_160k_cityscapes.py b/segmentation/configs/cityscapes/upernet_internimage_xl_512x1024_160k_cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..328c28a008e54c5f4da70023bbae1f0265bc2b0b --- /dev/null +++ b/segmentation/configs/cityscapes/upernet_internimage_xl_512x1024_160k_cityscapes.py @@ -0,0 +1,47 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +pretrained = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/internimage_xl_22k_192to384.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=192, + depths=[5, 5, 24, 5], + groups=[12, 24, 48, 96], + mlp_ratio=4., + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=2.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=dict(type='Pretrained', checkpoint=pretrained)), + decode_head=dict(num_classes=150, in_channels=[192, 384, 768, 1536]), + auxiliary_head=dict(num_classes=150, in_channels=768), + test_cfg=dict(mode='whole')) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00002, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=39, layer_decay_rate=0.94, + depths=[5, 5, 24, 5], offset_lr_scale=1.0)) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=16000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/configs/cityscapes/upernet_internimage_xl_512x1024_160k_mapillary2cityscapes.py b/segmentation/configs/cityscapes/upernet_internimage_xl_512x1024_160k_mapillary2cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd90d4c0a7e84933292c9fb3bb3db96aa8c6df1 --- /dev/null +++ b/segmentation/configs/cityscapes/upernet_internimage_xl_512x1024_160k_mapillary2cityscapes.py @@ -0,0 +1,47 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +_base_ = [ + '../_base_/models/upernet_r50.py', '../_base_/datasets/cityscapes_extra.py', + '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' +] +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_xl_512x1024_80k_mapillary.pth' +model = dict( + backbone=dict( + _delete_=True, + type='InternImage', + core_op='DCNv3', + channels=192, + depths=[5, 5, 24, 5], + groups=[12, 24, 48, 96], + mlp_ratio=4., + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=2.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict(num_classes=150, in_channels=[192, 384, 768, 1536]), + auxiliary_head=dict(num_classes=150, in_channels=768), + test_cfg=dict(mode='whole')) +optimizer = dict( + _delete_=True, type='AdamW', lr=0.00002, betas=(0.9, 0.999), weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict(num_layers=39, layer_decay_rate=0.94, + depths=[5, 5, 24, 5], offset_lr_scale=1.0)) +lr_config = dict(_delete_=True, policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-6, + power=1.0, min_lr=0.0, by_epoch=False) +# By default, models are trained on 8 GPUs with 2 images per GPU +data = dict(samples_per_gpu=2) +runner = dict(type='IterBasedRunner') +optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict(interval=4000, metric='mIoU', save_best='mIoU') +# fp16 = dict(loss_scale=dict(init_scale=512)) diff --git a/segmentation/mmcv_custom/__init__.py b/segmentation/mmcv_custom/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..22f0aee98ec56e1ba86a4ca6a0e9d2324d53cf9c --- /dev/null +++ b/segmentation/mmcv_custom/__init__.py @@ -0,0 +1,11 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +# -*- coding: utf-8 -*- +from .custom_layer_decay_optimizer_constructor import \ + CustomLayerDecayOptimizerConstructor + +__all__ = ['CustomLayerDecayOptimizerConstructor',] diff --git a/segmentation/mmcv_custom/__pycache__/__init__.cpython-39.pyc b/segmentation/mmcv_custom/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..871b698b59a9d1f08a2e7752af04d01fdb0b2ef1 Binary files /dev/null and b/segmentation/mmcv_custom/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmcv_custom/__pycache__/custom_layer_decay_optimizer_constructor.cpython-39.pyc b/segmentation/mmcv_custom/__pycache__/custom_layer_decay_optimizer_constructor.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69aa6d68018f8f34ba213a4081906bc6052e5239 Binary files /dev/null and b/segmentation/mmcv_custom/__pycache__/custom_layer_decay_optimizer_constructor.cpython-39.pyc differ diff --git a/segmentation/mmcv_custom/custom_layer_decay_optimizer_constructor.py b/segmentation/mmcv_custom/custom_layer_decay_optimizer_constructor.py new file mode 100644 index 0000000000000000000000000000000000000000..f536e124c523f6e937933dac21bb45af09b6085c --- /dev/null +++ b/segmentation/mmcv_custom/custom_layer_decay_optimizer_constructor.py @@ -0,0 +1,154 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +""" +Mostly copy-paste from BEiT library: +https://github.com/microsoft/unilm/blob/master/beit/semantic_segmentation/mmcv_custom/layer_decay_optimizer_constructor.py +""" + +import json + +from mmcv.runner import (OPTIMIZER_BUILDERS, DefaultOptimizerConstructor, + get_dist_info) +from mmseg.utils import get_root_logger + + +def get_num_layer_for_swin(var_name, num_max_layer, depths): + if var_name.startswith('backbone.patch_embed'): + return 0 + elif var_name.startswith('decode_head.mask_embed'): + return 0 + elif var_name.startswith('decode_head.cls_embed'): + return 0 + elif var_name.startswith('decode_head.level_embed'): + return 0 + elif var_name.startswith('decode_head.query_embed'): + return 0 + elif var_name.startswith('decode_head.query_feat'): + return 0 + if var_name.startswith('backbone.cb_modules.0.patch_embed'): + return 0 + elif 'level_embeds' in var_name: + return 0 + elif var_name.startswith('backbone.layers') or var_name.startswith( + 'backbone.levels'): + if var_name.split('.')[3] not in ['downsample', 'norm']: + stage_id = int(var_name.split('.')[2]) + layer_id = int(var_name.split('.')[4]) + # layers for Swin-Large: [2, 2, 18, 2] + if stage_id == 0: + return layer_id + 1 + elif stage_id == 1: + return layer_id + 1 + depths[0] + elif stage_id == 2: + return layer_id + 1 + depths[0] + depths[1] + else: + return layer_id + 1 + depths[0] + depths[1] + depths[2] + else: + stage_id = int(var_name.split('.')[2]) + if stage_id == 0: + return 1 + depths[0] + elif stage_id == 1: + return 1 + depths[0] + depths[1] + elif stage_id == 2: + return 1 + depths[0] + depths[1] + depths[2] + else: + return 1 + depths[0] + depths[1] + depths[2] + else: + return num_max_layer - 1 + + +@OPTIMIZER_BUILDERS.register_module() +class CustomLayerDecayOptimizerConstructor(DefaultOptimizerConstructor): + + def add_params(self, params, module, prefix='', is_dcn_module=None): + """Add all parameters of module to the params list. + The parameters of the given module will be added to the list of param + groups, with specific rules defined by paramwise_cfg. + Args: + params (list[dict]): A list of param groups, it will be modified + in place. + module (nn.Module): The module to be added. + prefix (str): The prefix of the module + is_dcn_module (int|float|None): If the current module is a + submodule of DCN, `is_dcn_module` will be passed to + control conv_offset layer's learning rate. Defaults to None. + """ + parameter_groups = {} + logger = get_root_logger() + logger.info(self.paramwise_cfg) + backbone_small_lr = self.paramwise_cfg.get('backbone_small_lr', False) + dino_head = self.paramwise_cfg.get('dino_head', False) + num_layers = self.paramwise_cfg.get('num_layers') + 2 + layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate') + depths = self.paramwise_cfg.get('depths') + offset_lr_scale = self.paramwise_cfg.get('offset_lr_scale', 1.0) + + logger.info('Build CustomLayerDecayOptimizerConstructor %f - %d' % + (layer_decay_rate, num_layers)) + weight_decay = self.base_wd + + for name, param in module.named_parameters(): + if not param.requires_grad: + continue # frozen weights + if len(param.shape) == 1 or name.endswith('.bias') or \ + 'relative_position' in name or \ + 'norm' in name or\ + 'sampling_offsets' in name: + group_name = 'no_decay' + this_weight_decay = 0. + else: + group_name = 'decay' + this_weight_decay = weight_decay + + layer_id = get_num_layer_for_swin(name, num_layers, depths) + if layer_id == num_layers - 1 and dino_head and \ + ('sampling_offsets' in name or 'reference_points' in name): + group_name = 'layer_%d_%s_0.1x' % (layer_id, group_name) + elif ('sampling_offsets' in name or 'reference_points' in name) and 'backbone' in name: + group_name = 'layer_%d_%s_offset_lr_scale' % (layer_id, + group_name) + else: + group_name = 'layer_%d_%s' % (layer_id, group_name) + + if group_name not in parameter_groups: + scale = layer_decay_rate ** (num_layers - layer_id - 1) + if scale < 1 and backbone_small_lr == True: + scale = scale * 0.1 + if '0.1x' in group_name: + scale = scale * 0.1 + if 'offset_lr_scale' in group_name: + scale = scale * offset_lr_scale + + parameter_groups[group_name] = { + 'weight_decay': this_weight_decay, + 'params': [], + 'param_names': [], + 'lr_scale': scale, + 'group_name': group_name, + 'lr': scale * self.base_lr, + } + + parameter_groups[group_name]['params'].append(param) + parameter_groups[group_name]['param_names'].append(name) + rank, _ = get_dist_info() + if rank == 0: + to_display = {} + for key in parameter_groups: + to_display[key] = { + 'param_names': parameter_groups[key]['param_names'], + 'lr_scale': parameter_groups[key]['lr_scale'], + 'lr': parameter_groups[key]['lr'], + 'weight_decay': parameter_groups[key]['weight_decay'], + } + logger.info('Param groups = %s' % json.dumps(to_display, indent=2)) + + # state_dict = module.state_dict() + # for group_name in parameter_groups: + # group = parameter_groups[group_name] + # for name in group["param_names"]: + # group["params"].append(state_dict[name]) + + params.extend(parameter_groups.values()) diff --git a/segmentation/mmseg_custom/__init__.py b/segmentation/mmseg_custom/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a25c0f2380985a84d965f55212a185f1f3181df7 --- /dev/null +++ b/segmentation/mmseg_custom/__init__.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .core import * # noqa: F401,F403 +from .datasets import * # noqa: F401,F403 +from .models import * # noqa: F401,F403 diff --git a/segmentation/mmseg_custom/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67015910b82294b7b9ada6a8d317001c37080e36 Binary files /dev/null and b/segmentation/mmseg_custom/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/__init__.py b/segmentation/mmseg_custom/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e6e4e906387e7f841284868c9d10d2c5bb516bdf --- /dev/null +++ b/segmentation/mmseg_custom/core/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +from mmseg.core.evaluation import * # noqa: F401, F403 +from mmseg.core.seg import * # noqa: F401, F403 + +from .anchor import * # noqa: F401,F403 +from .box import * # noqa: F401,F403 +from .evaluation import * # noqa: F401,F403 +from .mask import * # noqa: F401,F403 +from .utils import * # noqa: F401, F403 diff --git a/segmentation/mmseg_custom/core/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/core/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21719dcc51f2f1b161a0cd76d32bdddb53c547c2 Binary files /dev/null and b/segmentation/mmseg_custom/core/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/anchor/__init__.py b/segmentation/mmseg_custom/core/anchor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3be0ee764684db5add1231dd165be00095bb38cd --- /dev/null +++ b/segmentation/mmseg_custom/core/anchor/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +from .point_generator import MlvlPointGenerator # noqa: F401,F403 diff --git a/segmentation/mmseg_custom/core/anchor/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/core/anchor/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19321ac7fb155bdec50e23e9e402349d55292630 Binary files /dev/null and b/segmentation/mmseg_custom/core/anchor/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/anchor/__pycache__/builder.cpython-39.pyc b/segmentation/mmseg_custom/core/anchor/__pycache__/builder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3391e44fbd23aae8760f4292ecd6f28742bb88c6 Binary files /dev/null and b/segmentation/mmseg_custom/core/anchor/__pycache__/builder.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/anchor/__pycache__/point_generator.cpython-39.pyc b/segmentation/mmseg_custom/core/anchor/__pycache__/point_generator.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d2c6e74c8838fc82276a6a1f500924156e930be Binary files /dev/null and b/segmentation/mmseg_custom/core/anchor/__pycache__/point_generator.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/anchor/builder.py b/segmentation/mmseg_custom/core/anchor/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb25ad37937bcf227832e37469a0e31cae77826 --- /dev/null +++ b/segmentation/mmseg_custom/core/anchor/builder.py @@ -0,0 +1,19 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +from mmcv.utils import Registry, build_from_cfg + +PRIOR_GENERATORS = Registry('Generator for anchors and points') + +ANCHOR_GENERATORS = PRIOR_GENERATORS + + +def build_prior_generator(cfg, default_args=None): + return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) + + +def build_anchor_generator(cfg, default_args=None): + warnings.warn( + '``build_anchor_generator`` would be deprecated soon, please use ' + '``build_prior_generator`` ') + return build_prior_generator(cfg, default_args=default_args) diff --git a/segmentation/mmseg_custom/core/anchor/point_generator.py b/segmentation/mmseg_custom/core/anchor/point_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..34dd51a9597d63271c86d04d41b09611bb52c161 --- /dev/null +++ b/segmentation/mmseg_custom/core/anchor/point_generator.py @@ -0,0 +1,260 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import torch +from torch.nn.modules.utils import _pair + +from .builder import PRIOR_GENERATORS + + +@PRIOR_GENERATORS.register_module() +class PointGenerator: + def _meshgrid(self, x, y, row_major=True): + xx = x.repeat(len(y)) + yy = y.view(-1, 1).repeat(1, len(x)).view(-1) + if row_major: + return xx, yy + else: + return yy, xx + + def grid_points(self, featmap_size, stride=16, device='cuda'): + feat_h, feat_w = featmap_size + shift_x = torch.arange(0., feat_w, device=device) * stride + shift_y = torch.arange(0., feat_h, device=device) * stride + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + stride = shift_x.new_full((shift_xx.shape[0], ), stride) + shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) + all_points = shifts.to(device) + return all_points + + def valid_flags(self, featmap_size, valid_size, device='cuda'): + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + return valid + + +@PRIOR_GENERATORS.register_module() +class MlvlPointGenerator: + """Standard points generator for multi-level (Mlvl) feature maps in 2D + points-based detectors. + + Args: + strides (list[int] | list[tuple[int, int]]): Strides of anchors + in multiple feature levels in order (w, h). + offset (float): The offset of points, the value is normalized with + corresponding stride. Defaults to 0.5. + """ + def __init__(self, strides, offset=0.5): + self.strides = [_pair(stride) for stride in strides] + self.offset = offset + + @property + def num_levels(self): + """int: number of feature levels that the generator will be applied""" + return len(self.strides) + + @property + def num_base_priors(self): + """list[int]: The number of priors (points) at a point + on the feature grid""" + return [1 for _ in range(len(self.strides))] + + def _meshgrid(self, x, y, row_major=True): + yy, xx = torch.meshgrid(y, x) + if row_major: + # warning .flatten() would cause error in ONNX exporting + # have to use reshape here + return xx.reshape(-1), yy.reshape(-1) + + else: + return yy.reshape(-1), xx.reshape(-1) + + def grid_priors(self, + featmap_sizes, + dtype=torch.float32, + device='cuda', + with_stride=False): + """Generate grid points of multiple feature levels. + + Args: + featmap_sizes (list[tuple]): List of feature map sizes in + multiple feature levels, each size arrange as + as (h, w). + dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. + device (str): The device where the anchors will be put on. + with_stride (bool): Whether to concatenate the stride to + the last dimension of points. + + Return: + list[torch.Tensor]: Points of multiple feature levels. + The sizes of each tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + + assert self.num_levels == len(featmap_sizes) + multi_level_priors = [] + for i in range(self.num_levels): + priors = self.single_level_grid_priors(featmap_sizes[i], + level_idx=i, + dtype=dtype, + device=device, + with_stride=with_stride) + multi_level_priors.append(priors) + return multi_level_priors + + def single_level_grid_priors(self, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda', + with_stride=False): + """Generate grid Points of a single level. + + Note: + This function is usually called by method ``self.grid_priors``. + + Args: + featmap_size (tuple[int]): Size of the feature maps, arrange as + (h, w). + level_idx (int): The index of corresponding feature map level. + dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. + device (str, optional): The device the tensor will be put on. + Defaults to 'cuda'. + with_stride (bool): Concatenate the stride to the last dimension + of points. + + Return: + Tensor: Points of single feature levels. + The shape of tensor should be (N, 2) when with stride is + ``False``, where N = width * height, width and height + are the sizes of the corresponding feature level, + and the last dimension 2 represent (coord_x, coord_y), + otherwise the shape should be (N, 4), + and the last dimension 4 represent + (coord_x, coord_y, stride_w, stride_h). + """ + feat_h, feat_w = featmap_size + stride_w, stride_h = self.strides[level_idx] + shift_x = (torch.arange(0, feat_w, device=device) + + self.offset) * stride_w + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + shift_x = shift_x.to(dtype) + + shift_y = (torch.arange(0, feat_h, device=device) + + self.offset) * stride_h + # keep featmap_size as Tensor instead of int, so that we + # can convert to ONNX correctly + shift_y = shift_y.to(dtype) + shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) + if not with_stride: + shifts = torch.stack([shift_xx, shift_yy], dim=-1) + else: + # use `shape[0]` instead of `len(shift_xx)` for ONNX export + stride_w = shift_xx.new_full((shift_xx.shape[0], ), + stride_w).to(dtype) + stride_h = shift_xx.new_full((shift_yy.shape[0], ), + stride_h).to(dtype) + shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], + dim=-1) + all_points = shifts.to(device) + return all_points + + def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): + """Generate valid flags of points of multiple feature levels. + + Args: + featmap_sizes (list(tuple)): List of feature map sizes in + multiple feature levels, each size arrange as + as (h, w). + pad_shape (tuple(int)): The padded shape of the image, + arrange as (h, w). + device (str): The device where the anchors will be put on. + + Return: + list(torch.Tensor): Valid flags of points of multiple levels. + """ + assert self.num_levels == len(featmap_sizes) + multi_level_flags = [] + for i in range(self.num_levels): + point_stride = self.strides[i] + feat_h, feat_w = featmap_sizes[i] + h, w = pad_shape[:2] + valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h) + valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w) + flags = self.single_level_valid_flags((feat_h, feat_w), + (valid_feat_h, valid_feat_w), + device=device) + multi_level_flags.append(flags) + return multi_level_flags + + def single_level_valid_flags(self, + featmap_size, + valid_size, + device='cuda'): + """Generate the valid flags of points of a single feature map. + + Args: + featmap_size (tuple[int]): The size of feature maps, arrange as + as (h, w). + valid_size (tuple[int]): The valid size of the feature maps. + The size arrange as as (h, w). + device (str, optional): The device where the flags will be put on. + Defaults to 'cuda'. + + Returns: + torch.Tensor: The valid flags of each points in a single level \ + feature map. + """ + feat_h, feat_w = featmap_size + valid_h, valid_w = valid_size + assert valid_h <= feat_h and valid_w <= feat_w + valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) + valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) + valid_x[:valid_w] = 1 + valid_y[:valid_h] = 1 + valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) + valid = valid_xx & valid_yy + return valid + + def sparse_priors(self, + prior_idxs, + featmap_size, + level_idx, + dtype=torch.float32, + device='cuda'): + """Generate sparse points according to the ``prior_idxs``. + + Args: + prior_idxs (Tensor): The index of corresponding anchors + in the feature map. + featmap_size (tuple[int]): feature map size arrange as (w, h). + level_idx (int): The level index of corresponding feature + map. + dtype (obj:`torch.dtype`): Date type of points. Defaults to + ``torch.float32``. + device (obj:`torch.device`): The device where the points is + located. + Returns: + Tensor: Anchor with shape (N, 2), N should be equal to + the length of ``prior_idxs``. And last dimension + 2 represent (coord_x, coord_y). + """ + height, width = featmap_size + x = (prior_idxs % width + self.offset) * self.strides[level_idx][0] + y = ((prior_idxs // width) % height + + self.offset) * self.strides[level_idx][1] + prioris = torch.stack([x, y], 1).to(dtype) + prioris = prioris.to(device) + return prioris diff --git a/segmentation/mmseg_custom/core/box/__init__.py b/segmentation/mmseg_custom/core/box/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a6a127f85f7088f44a574efc9d92ef9d9faed2a8 --- /dev/null +++ b/segmentation/mmseg_custom/core/box/__init__.py @@ -0,0 +1,3 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +from .builder import * # noqa: F401,F403 +from .samplers import MaskPseudoSampler # noqa: F401,F403 diff --git a/segmentation/mmseg_custom/core/box/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/core/box/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c5696c4ec20a4a53119fcdbf80f43f0a63ce443 Binary files /dev/null and b/segmentation/mmseg_custom/core/box/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/box/__pycache__/builder.cpython-39.pyc b/segmentation/mmseg_custom/core/box/__pycache__/builder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef4fe9d8004c8ba0fce366a9fc68a330709d968c Binary files /dev/null and b/segmentation/mmseg_custom/core/box/__pycache__/builder.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/box/builder.py b/segmentation/mmseg_custom/core/box/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..af4b8a8352560bc9196b1590ec8f6876920c5d46 --- /dev/null +++ b/segmentation/mmseg_custom/core/box/builder.py @@ -0,0 +1,15 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from mmcv.utils import Registry, build_from_cfg + +BBOX_SAMPLERS = Registry('bbox_sampler') +BBOX_CODERS = Registry('bbox_coder') + + +def build_sampler(cfg, **default_args): + """Builder of box sampler.""" + return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) + + +def build_bbox_coder(cfg, **default_args): + """Builder of box coder.""" + return build_from_cfg(cfg, BBOX_CODERS, default_args) diff --git a/segmentation/mmseg_custom/core/box/samplers/__init__.py b/segmentation/mmseg_custom/core/box/samplers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d30f99954a31ff4dc8eb77a18a7d141d176edef1 --- /dev/null +++ b/segmentation/mmseg_custom/core/box/samplers/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +from .mask_pseudo_sampler import MaskPseudoSampler # noqa: F401,F403 diff --git a/segmentation/mmseg_custom/core/box/samplers/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/core/box/samplers/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8aec7d65a47a09d24eb05f0df48959970d8a51a Binary files /dev/null and b/segmentation/mmseg_custom/core/box/samplers/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/box/samplers/__pycache__/base_sampler.cpython-39.pyc b/segmentation/mmseg_custom/core/box/samplers/__pycache__/base_sampler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5034e03d544102f0eece5e2263b41890316f16b Binary files /dev/null and b/segmentation/mmseg_custom/core/box/samplers/__pycache__/base_sampler.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/box/samplers/__pycache__/mask_pseudo_sampler.cpython-39.pyc b/segmentation/mmseg_custom/core/box/samplers/__pycache__/mask_pseudo_sampler.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ab059f5dab90374c69f06009d20a7d7186c57fb Binary files /dev/null and b/segmentation/mmseg_custom/core/box/samplers/__pycache__/mask_pseudo_sampler.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/box/samplers/__pycache__/mask_sampling_result.cpython-39.pyc b/segmentation/mmseg_custom/core/box/samplers/__pycache__/mask_sampling_result.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fa15054aab3cb37feb81a5c989815a7b7ab0323 Binary files /dev/null and b/segmentation/mmseg_custom/core/box/samplers/__pycache__/mask_sampling_result.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/box/samplers/__pycache__/sampling_result.cpython-39.pyc b/segmentation/mmseg_custom/core/box/samplers/__pycache__/sampling_result.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8dcde04b83971d8c3ecceb55e97ce4862d533b8e Binary files /dev/null and b/segmentation/mmseg_custom/core/box/samplers/__pycache__/sampling_result.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/box/samplers/base_sampler.py b/segmentation/mmseg_custom/core/box/samplers/base_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..dee649739e03013050089d831a28e4c549b06768 --- /dev/null +++ b/segmentation/mmseg_custom/core/box/samplers/base_sampler.py @@ -0,0 +1,105 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import torch + +from .sampling_result import SamplingResult + + +class BaseSampler(metaclass=ABCMeta): + """Base class of samplers.""" + def __init__(self, + num, + pos_fraction, + neg_pos_ub=-1, + add_gt_as_proposals=True, + **kwargs): + self.num = num + self.pos_fraction = pos_fraction + self.neg_pos_ub = neg_pos_ub + self.add_gt_as_proposals = add_gt_as_proposals + self.pos_sampler = self + self.neg_sampler = self + + @abstractmethod + def _sample_pos(self, assign_result, num_expected, **kwargs): + """Sample positive samples.""" + pass + + @abstractmethod + def _sample_neg(self, assign_result, num_expected, **kwargs): + """Sample negative samples.""" + pass + + def sample(self, + assign_result, + bboxes, + gt_bboxes, + gt_labels=None, + **kwargs): + """Sample positive and negative bboxes. + + This is a simple implementation of bbox sampling given candidates, + assigning results and ground truth bboxes. + + Args: + assign_result (:obj:`AssignResult`): Bbox assigning results. + bboxes (Tensor): Boxes to be sampled from. + gt_bboxes (Tensor): Ground truth bboxes. + gt_labels (Tensor, optional): Class labels of ground truth bboxes. + + Returns: + :obj:`SamplingResult`: Sampling result. + + Example: + >>> from mmdet.core.bbox import RandomSampler + >>> from mmdet.core.bbox import AssignResult + >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes + >>> rng = ensure_rng(None) + >>> assign_result = AssignResult.random(rng=rng) + >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) + >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) + >>> gt_labels = None + >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, + >>> add_gt_as_proposals=False) + >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) + """ + if len(bboxes.shape) < 2: + bboxes = bboxes[None, :] + + bboxes = bboxes[:, :4] + + gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) + if self.add_gt_as_proposals and len(gt_bboxes) > 0: + if gt_labels is None: + raise ValueError( + 'gt_labels must be given when add_gt_as_proposals is True') + bboxes = torch.cat([gt_bboxes, bboxes], dim=0) + assign_result.add_gt_(gt_labels) + gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) + gt_flags = torch.cat([gt_ones, gt_flags]) + + num_expected_pos = int(self.num * self.pos_fraction) + pos_inds = self.pos_sampler._sample_pos(assign_result, + num_expected_pos, + bboxes=bboxes, + **kwargs) + # We found that sampled indices have duplicated items occasionally. + # (may be a bug of PyTorch) + pos_inds = pos_inds.unique() + num_sampled_pos = pos_inds.numel() + num_expected_neg = self.num - num_sampled_pos + if self.neg_pos_ub >= 0: + _pos = max(1, num_sampled_pos) + neg_upper_bound = int(self.neg_pos_ub * _pos) + if num_expected_neg > neg_upper_bound: + num_expected_neg = neg_upper_bound + neg_inds = self.neg_sampler._sample_neg(assign_result, + num_expected_neg, + bboxes=bboxes, + **kwargs) + neg_inds = neg_inds.unique() + + sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, + assign_result, gt_flags) + return sampling_result diff --git a/segmentation/mmseg_custom/core/box/samplers/mask_pseudo_sampler.py b/segmentation/mmseg_custom/core/box/samplers/mask_pseudo_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..501a2010d4b75fa5e1427f6010ab2ceed8407b1e --- /dev/null +++ b/segmentation/mmseg_custom/core/box/samplers/mask_pseudo_sampler.py @@ -0,0 +1,43 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""copy from +https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" + +import torch + +from ..builder import BBOX_SAMPLERS +from .base_sampler import BaseSampler +from .mask_sampling_result import MaskSamplingResult + + +@BBOX_SAMPLERS.register_module() +class MaskPseudoSampler(BaseSampler): + """A pseudo sampler that does not do sampling actually.""" + def __init__(self, **kwargs): + pass + + def _sample_pos(self, **kwargs): + """Sample positive samples.""" + raise NotImplementedError + + def _sample_neg(self, **kwargs): + """Sample negative samples.""" + raise NotImplementedError + + def sample(self, assign_result, masks, gt_masks, **kwargs): + """Directly returns the positive and negative indices of samples. + + Args: + assign_result (:obj:`AssignResult`): Assigned results + masks (torch.Tensor): Bounding boxes + gt_masks (torch.Tensor): Ground truth boxes + Returns: + :obj:`SamplingResult`: sampler results + """ + pos_inds = torch.nonzero(assign_result.gt_inds > 0, + as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero(assign_result.gt_inds == 0, + as_tuple=False).squeeze(-1).unique() + gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8) + sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, + gt_masks, assign_result, gt_flags) + return sampling_result diff --git a/segmentation/mmseg_custom/core/box/samplers/mask_sampling_result.py b/segmentation/mmseg_custom/core/box/samplers/mask_sampling_result.py new file mode 100644 index 0000000000000000000000000000000000000000..f6c500b5be21120552564a85235bfbc5451e78c5 --- /dev/null +++ b/segmentation/mmseg_custom/core/box/samplers/mask_sampling_result.py @@ -0,0 +1,59 @@ +# Copyright (c) OpenMMLab. All rights reserved. +"""copy from +https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" + +import torch + +from .sampling_result import SamplingResult + + +class MaskSamplingResult(SamplingResult): + """Mask sampling result.""" + def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result, + gt_flags): + self.pos_inds = pos_inds + self.neg_inds = neg_inds + self.pos_masks = masks[pos_inds] + self.neg_masks = masks[neg_inds] + self.pos_is_gt = gt_flags[pos_inds] + + self.num_gts = gt_masks.shape[0] + self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + + if gt_masks.numel() == 0: + # hack for index error case + assert self.pos_assigned_gt_inds.numel() == 0 + self.pos_gt_masks = torch.empty_like(gt_masks) + else: + self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :] + + if assign_result.labels is not None: + self.pos_gt_labels = assign_result.labels[pos_inds] + else: + self.pos_gt_labels = None + + @property + def masks(self): + """torch.Tensor: concatenated positive and negative boxes""" + return torch.cat([self.pos_masks, self.neg_masks]) + + def __nice__(self): + data = self.info.copy() + data['pos_masks'] = data.pop('pos_masks').shape + data['neg_masks'] = data.pop('neg_masks').shape + parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] + body = ' ' + ',\n '.join(parts) + return '{\n' + body + '\n}' + + @property + def info(self): + """Returns a dictionary of info about the object.""" + return { + 'pos_inds': self.pos_inds, + 'neg_inds': self.neg_inds, + 'pos_masks': self.pos_masks, + 'neg_masks': self.neg_masks, + 'pos_is_gt': self.pos_is_gt, + 'num_gts': self.num_gts, + 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, + } diff --git a/segmentation/mmseg_custom/core/box/samplers/sampling_result.py b/segmentation/mmseg_custom/core/box/samplers/sampling_result.py new file mode 100644 index 0000000000000000000000000000000000000000..d1ac5785b8df0b5335b61cc64d78c39aa46cfe25 --- /dev/null +++ b/segmentation/mmseg_custom/core/box/samplers/sampling_result.py @@ -0,0 +1,150 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmdet.utils import util_mixins + + +class SamplingResult(util_mixins.NiceRepr): + """Bbox sampling result. + + Example: + >>> # xdoctest: +IGNORE_WANT + >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA + >>> self = SamplingResult.random(rng=10) + >>> print(f'self = {self}') + self = + """ + def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, + gt_flags): + self.pos_inds = pos_inds + self.neg_inds = neg_inds + self.pos_bboxes = bboxes[pos_inds] + self.neg_bboxes = bboxes[neg_inds] + self.pos_is_gt = gt_flags[pos_inds] + + self.num_gts = gt_bboxes.shape[0] + self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + + if gt_bboxes.numel() == 0: + # hack for index error case + assert self.pos_assigned_gt_inds.numel() == 0 + self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4) + else: + if len(gt_bboxes.shape) < 2: + gt_bboxes = gt_bboxes.view(-1, 4) + + self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long(), :] + + if assign_result.labels is not None: + self.pos_gt_labels = assign_result.labels[pos_inds] + else: + self.pos_gt_labels = None + + @property + def bboxes(self): + """torch.Tensor: concatenated positive and negative boxes""" + return torch.cat([self.pos_bboxes, self.neg_bboxes]) + + def to(self, device): + """Change the device of the data inplace. + + Example: + >>> self = SamplingResult.random() + >>> print(f'self = {self.to(None)}') + >>> # xdoctest: +REQUIRES(--gpu) + >>> print(f'self = {self.to(0)}') + """ + _dict = self.__dict__ + for key, value in _dict.items(): + if isinstance(value, torch.Tensor): + _dict[key] = value.to(device) + return self + + def __nice__(self): + data = self.info.copy() + data['pos_bboxes'] = data.pop('pos_bboxes').shape + data['neg_bboxes'] = data.pop('neg_bboxes').shape + parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] + body = ' ' + ',\n '.join(parts) + return '{\n' + body + '\n}' + + @property + def info(self): + """Returns a dictionary of info about the object.""" + return { + 'pos_inds': self.pos_inds, + 'neg_inds': self.neg_inds, + 'pos_bboxes': self.pos_bboxes, + 'neg_bboxes': self.neg_bboxes, + 'pos_is_gt': self.pos_is_gt, + 'num_gts': self.num_gts, + 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, + } + + @classmethod + def random(cls, rng=None, **kwargs): + """ + Args: + rng (None | int | numpy.random.RandomState): seed or state. + kwargs (keyword arguments): + - num_preds: number of predicted boxes + - num_gts: number of true boxes + - p_ignore (float): probability of a predicted box assigned to \ + an ignored truth. + - p_assigned (float): probability of a predicted box not being \ + assigned. + - p_use_label (float | bool): with labels or not. + + Returns: + :obj:`SamplingResult`: Randomly generated sampling result. + + Example: + >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA + >>> self = SamplingResult.random() + >>> print(self.__dict__) + """ + from mmdet.core.bbox import demodata + from mmdet.core.bbox.assigners.assign_result import AssignResult + from mmdet.core.bbox.samplers.random_sampler import RandomSampler + rng = demodata.ensure_rng(rng) + + # make probabalistic? + num = 32 + pos_fraction = 0.5 + neg_pos_ub = -1 + + assign_result = AssignResult.random(rng=rng, **kwargs) + + # Note we could just compute an assignment + bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng) + gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng) + + if rng.rand() > 0.2: + # sometimes algorithms squeeze their data, be robust to that + gt_bboxes = gt_bboxes.squeeze() + bboxes = bboxes.squeeze() + + if assign_result.labels is None: + gt_labels = None + else: + gt_labels = None # todo + + if gt_labels is None: + add_gt_as_proposals = False + else: + add_gt_as_proposals = True # make probabalistic? + + sampler = RandomSampler(num, + pos_fraction, + neg_pos_ub=neg_pos_ub, + add_gt_as_proposals=add_gt_as_proposals, + rng=rng) + self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) + return self diff --git a/segmentation/mmseg_custom/core/evaluation/__init__.py b/segmentation/mmseg_custom/core/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f0282978a3aa9447bdde9ec473478b60cf9c541 --- /dev/null +++ b/segmentation/mmseg_custom/core/evaluation/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +from .panoptic_utils import INSTANCE_OFFSET # noqa: F401,F403 diff --git a/segmentation/mmseg_custom/core/evaluation/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/core/evaluation/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32e1bc53942fab702ebd788bfab558ec656edd1f Binary files /dev/null and b/segmentation/mmseg_custom/core/evaluation/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/evaluation/__pycache__/panoptic_utils.cpython-39.pyc b/segmentation/mmseg_custom/core/evaluation/__pycache__/panoptic_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6239d1a438143e02ab318b2482f8f9c7a5973146 Binary files /dev/null and b/segmentation/mmseg_custom/core/evaluation/__pycache__/panoptic_utils.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/evaluation/panoptic_utils.py b/segmentation/mmseg_custom/core/evaluation/panoptic_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..10c9ad934e0c9047ccdcfbf0d429ab13b8527d88 --- /dev/null +++ b/segmentation/mmseg_custom/core/evaluation/panoptic_utils.py @@ -0,0 +1,6 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# A custom value to distinguish instance ID and category ID; need to +# be greater than the number of categories. +# For a pixel in the panoptic result map: +# pan_id = ins_id * INSTANCE_OFFSET + cat_id +INSTANCE_OFFSET = 1000 diff --git a/segmentation/mmseg_custom/core/mask/__init__.py b/segmentation/mmseg_custom/core/mask/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d226c23dc17bb0481854336f118f973c0af51f67 --- /dev/null +++ b/segmentation/mmseg_custom/core/mask/__init__.py @@ -0,0 +1,2 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +from .utils import mask2bbox # noqa: F401,F403 diff --git a/segmentation/mmseg_custom/core/mask/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/core/mask/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86a4803aa9d0fa0ddc0e99d08e9655f91dcd307c Binary files /dev/null and b/segmentation/mmseg_custom/core/mask/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/mask/__pycache__/utils.cpython-39.pyc b/segmentation/mmseg_custom/core/mask/__pycache__/utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adea342e80b5ef55be6312f57e55a4db853f882d Binary files /dev/null and b/segmentation/mmseg_custom/core/mask/__pycache__/utils.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/mask/utils.py b/segmentation/mmseg_custom/core/mask/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..90544b34f49aa60ac2a1abae10f1a89cc9fe43f0 --- /dev/null +++ b/segmentation/mmseg_custom/core/mask/utils.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +import pycocotools.mask as mask_util +import torch + + +def split_combined_polys(polys, poly_lens, polys_per_mask): + """Split the combined 1-D polys into masks. + + A mask is represented as a list of polys, and a poly is represented as + a 1-D array. In dataset, all masks are concatenated into a single 1-D + tensor. Here we need to split the tensor into original representations. + + Args: + polys (list): a list (length = image num) of 1-D tensors + poly_lens (list): a list (length = image num) of poly length + polys_per_mask (list): a list (length = image num) of poly number + of each mask + + Returns: + list: a list (length = image num) of list (length = mask num) of \ + list (length = poly num) of numpy array. + """ + mask_polys_list = [] + for img_id in range(len(polys)): + polys_single = polys[img_id] + polys_lens_single = poly_lens[img_id].tolist() + polys_per_mask_single = polys_per_mask[img_id].tolist() + + split_polys = mmcv.slice_list(polys_single, polys_lens_single) + mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) + mask_polys_list.append(mask_polys) + return mask_polys_list + + +# TODO: move this function to more proper place +def encode_mask_results(mask_results): + """Encode bitmap mask to RLE code. + + Args: + mask_results (list | tuple[list]): bitmap mask results. + In mask scoring rcnn, mask_results is a tuple of (segm_results, + segm_cls_score). + + Returns: + list | tuple: RLE encoded mask. + """ + if isinstance(mask_results, tuple): # mask scoring + cls_segms, cls_mask_scores = mask_results + else: + cls_segms = mask_results + num_classes = len(cls_segms) + encoded_mask_results = [[] for _ in range(num_classes)] + for i in range(len(cls_segms)): + for cls_segm in cls_segms[i]: + encoded_mask_results[i].append( + mask_util.encode( + np.array( + cls_segm[:, :, np.newaxis], order='F', + dtype='uint8'))[0]) # encoded with RLE + if isinstance(mask_results, tuple): + return encoded_mask_results, cls_mask_scores + else: + return encoded_mask_results + + +def mask2bbox(masks): + """Obtain tight bounding boxes of binary masks. + + Args: + masks (Tensor): Binary mask of shape (n, h, w). + + Returns: + Tensor: Bboxe with shape (n, 4) of \ + positive region in binary mask. + """ + N = masks.shape[0] + bboxes = masks.new_zeros((N, 4), dtype=torch.float32) + x_any = torch.any(masks, dim=1) + y_any = torch.any(masks, dim=2) + for i in range(N): + x = torch.where(x_any[i, :])[0] + y = torch.where(y_any[i, :])[0] + if len(x) > 0 and len(y) > 0: + bboxes[i, :] = bboxes.new_tensor( + [x[0], y[0], x[-1] + 1, y[-1] + 1]) + + return bboxes diff --git a/segmentation/mmseg_custom/core/utils/__init__.py b/segmentation/mmseg_custom/core/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..26ff24d358b061e1c09029c56681d9116ef62a8c --- /dev/null +++ b/segmentation/mmseg_custom/core/utils/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, + reduce_mean) +from .misc import add_prefix, multi_apply + +__all__ = [ + 'add_prefix', 'multi_apply', 'DistOptimizerHook', 'allreduce_grads', + 'all_reduce_dict', 'reduce_mean' +] diff --git a/segmentation/mmseg_custom/core/utils/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/core/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42949bb6890cf16a770131074f9d41f0317af2cf Binary files /dev/null and b/segmentation/mmseg_custom/core/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/utils/__pycache__/dist_utils.cpython-39.pyc b/segmentation/mmseg_custom/core/utils/__pycache__/dist_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d0e2c7deb18f266925070de388008a4cb6c50c1 Binary files /dev/null and b/segmentation/mmseg_custom/core/utils/__pycache__/dist_utils.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/utils/__pycache__/misc.cpython-39.pyc b/segmentation/mmseg_custom/core/utils/__pycache__/misc.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a728fdd44a14c70682a8dc78520fd18c7e56ce0 Binary files /dev/null and b/segmentation/mmseg_custom/core/utils/__pycache__/misc.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/core/utils/dist_utils.py b/segmentation/mmseg_custom/core/utils/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..88e519f72ba270b56aa4e04d136ca66abbb8e517 --- /dev/null +++ b/segmentation/mmseg_custom/core/utils/dist_utils.py @@ -0,0 +1,148 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import functools +import pickle +import warnings +from collections import OrderedDict + +import torch +import torch.distributed as dist +from mmcv.runner import OptimizerHook, get_dist_info +from torch._utils import (_flatten_dense_tensors, _take_tensors, + _unflatten_dense_tensors) + + +def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): + if bucket_size_mb > 0: + bucket_size_bytes = bucket_size_mb * 1024 * 1024 + buckets = _take_tensors(tensors, bucket_size_bytes) + else: + buckets = OrderedDict() + for tensor in tensors: + tp = tensor.type() + if tp not in buckets: + buckets[tp] = [] + buckets[tp].append(tensor) + buckets = buckets.values() + + for bucket in buckets: + flat_tensors = _flatten_dense_tensors(bucket) + dist.all_reduce(flat_tensors) + flat_tensors.div_(world_size) + for tensor, synced in zip( + bucket, _unflatten_dense_tensors(flat_tensors, bucket)): + tensor.copy_(synced) + + +def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): + """Allreduce gradients. + + Args: + params (list[torch.Parameters]): List of parameters of a model + coalesce (bool, optional): Whether allreduce parameters as a whole. + Defaults to True. + bucket_size_mb (int, optional): Size of bucket, the unit is MB. + Defaults to -1. + """ + grads = [ + param.grad.data for param in params + if param.requires_grad and param.grad is not None + ] + world_size = dist.get_world_size() + if coalesce: + _allreduce_coalesced(grads, world_size, bucket_size_mb) + else: + for tensor in grads: + dist.all_reduce(tensor.div_(world_size)) + + +class DistOptimizerHook(OptimizerHook): + """Deprecated optimizer hook for distributed training.""" + def __init__(self, *args, **kwargs): + warnings.warn('"DistOptimizerHook" is deprecated, please switch to' + '"mmcv.runner.OptimizerHook".') + super().__init__(*args, **kwargs) + + +def reduce_mean(tensor): + """"Obtain the mean of tensor on different GPUs.""" + if not (dist.is_available() and dist.is_initialized()): + return tensor + tensor = tensor.clone() + dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) + return tensor + + +def obj2tensor(pyobj, device='cuda'): + """Serialize picklable python object to tensor.""" + storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) + return torch.ByteTensor(storage).to(device=device) + + +def tensor2obj(tensor): + """Deserialize tensor to picklable python object.""" + return pickle.loads(tensor.cpu().numpy().tobytes()) + + +@functools.lru_cache() +def _get_global_gloo_group(): + """Return a process group based on gloo backend, containing all the ranks + The result is cached.""" + if dist.get_backend() == 'nccl': + return dist.new_group(backend='gloo') + else: + return dist.group.WORLD + + +def all_reduce_dict(py_dict, op='sum', group=None, to_float=True): + """Apply all reduce function for python dict object. + + The code is modified from https://github.com/Megvii- + BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py. + + NOTE: make sure that py_dict in different ranks has the same keys and + the values should be in the same shape. + + Args: + py_dict (dict): Dict to be applied all reduce op. + op (str): Operator, could be 'sum' or 'mean'. Default: 'sum' + group (:obj:`torch.distributed.group`, optional): Distributed group, + Default: None. + to_float (bool): Whether to convert all values of dict to float. + Default: True. + + Returns: + OrderedDict: reduced python dict object. + """ + _, world_size = get_dist_info() + if world_size == 1: + return py_dict + if group is None: + # TODO: May try not to use gloo in the future + group = _get_global_gloo_group() + if dist.get_world_size(group) == 1: + return py_dict + + # all reduce logic across different devices. + py_key = list(py_dict.keys()) + py_key_tensor = obj2tensor(py_key) + dist.broadcast(py_key_tensor, src=0) + py_key = tensor2obj(py_key_tensor) + + tensor_shapes = [py_dict[k].shape for k in py_key] + tensor_numels = [py_dict[k].numel() for k in py_key] + + if to_float: + flatten_tensor = torch.cat( + [py_dict[k].flatten().float() for k in py_key]) + else: + flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) + + dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM) + if op == 'mean': + flatten_tensor /= world_size + + split_tensors = [ + x.reshape(shape) for x, shape in zip( + torch.split(flatten_tensor, tensor_numels), tensor_shapes) + ] + return OrderedDict({k: v for k, v in zip(py_key, split_tensors)}) diff --git a/segmentation/mmseg_custom/core/utils/misc.py b/segmentation/mmseg_custom/core/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..9e161fba7193f4221fa7daf5b659b553683f6f65 --- /dev/null +++ b/segmentation/mmseg_custom/core/utils/misc.py @@ -0,0 +1,40 @@ +# Copyright (c) OpenMMLab. All rights reserved. +def multi_apply(func, *args, **kwargs): + """Apply function to a list of arguments. + + Note: + This function applies the ``func`` to multiple inputs and + map the multiple outputs of the ``func`` into different + list. Each list contains the same type of outputs corresponding + to different inputs. + + Args: + func (Function): A function that will be applied to a list of + arguments + + Returns: + tuple(list): A tuple containing multiple list, each list contains \ + a kind of returned results by the function + """ + pfunc = partial(func, **kwargs) if kwargs else func + map_results = map(pfunc, *args) + return tuple(map(list, zip(*map_results))) + + +def add_prefix(inputs, prefix): + """Add prefix for dict. + + Args: + inputs (dict): The input dict with str keys. + prefix (str): The prefix to add. + + Returns: + + dict: The dict with keys updated with ``prefix``. + """ + + outputs = dict() + for name, value in inputs.items(): + outputs[f'{prefix}.{name}'] = value + + return outputs diff --git a/segmentation/mmseg_custom/datasets/__init__.py b/segmentation/mmseg_custom/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5ede92883e46937fde15e9c336e51327d33252 --- /dev/null +++ b/segmentation/mmseg_custom/datasets/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .dataset_wrappers import ConcatDataset +from .mapillary import MapillaryDataset # noqa: F401,F403 +from .nyu_depth_v2 import NYUDepthV2Dataset # noqa: F401,F403 +from .pipelines import * # noqa: F401,F403 + +__all__ = [ + 'MapillaryDataset', 'NYUDepthV2Dataset', 'ConcatDataset' +] diff --git a/segmentation/mmseg_custom/datasets/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/datasets/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caad5196f5ddfcf21d76bb4884926dce76b722b3 Binary files /dev/null and b/segmentation/mmseg_custom/datasets/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/datasets/__pycache__/dataset_wrappers.cpython-39.pyc b/segmentation/mmseg_custom/datasets/__pycache__/dataset_wrappers.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a30b6a363c513718790a507b02aa815e22a676c Binary files /dev/null and b/segmentation/mmseg_custom/datasets/__pycache__/dataset_wrappers.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/datasets/__pycache__/mapillary.cpython-39.pyc b/segmentation/mmseg_custom/datasets/__pycache__/mapillary.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20b2e92f65370f0294c4fc146b35871bb039512b Binary files /dev/null and b/segmentation/mmseg_custom/datasets/__pycache__/mapillary.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/datasets/__pycache__/nyu_depth_v2.cpython-39.pyc b/segmentation/mmseg_custom/datasets/__pycache__/nyu_depth_v2.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26ad1ade10b802761578ea14901df05b86dd5ebd Binary files /dev/null and b/segmentation/mmseg_custom/datasets/__pycache__/nyu_depth_v2.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/datasets/dataset_wrappers.py b/segmentation/mmseg_custom/datasets/dataset_wrappers.py new file mode 100644 index 0000000000000000000000000000000000000000..daef16d2c2c362aa44ed8472842bd6b3f883fe73 --- /dev/null +++ b/segmentation/mmseg_custom/datasets/dataset_wrappers.py @@ -0,0 +1,154 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import bisect +from itertools import chain + +import mmcv +import numpy as np +from mmcv.utils import build_from_cfg, print_log +from mmseg.datasets.builder import DATASETS +from torch.utils.data.dataset import ConcatDataset as _ConcatDataset + + +@DATASETS.register_module(force=True) +class ConcatDataset(_ConcatDataset): + """A wrapper of concatenated dataset. + + Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but + support evaluation and formatting results + + Args: + datasets (list[:obj:`Dataset`]): A list of datasets. + separate_eval (bool): Whether to evaluate the concatenated + dataset results separately, Defaults to True. + """ + + def __init__(self, datasets, separate_eval=True): + super(ConcatDataset, self).__init__(datasets) + self.CLASSES = datasets[0].CLASSES + self.PALETTE = datasets[0].PALETTE + self.separate_eval = separate_eval + assert separate_eval in [True, False], \ + f'separate_eval can only be True or False,' \ + f'but get {separate_eval}' + + def evaluate(self, results, logger=None, **kwargs): + """Evaluate the results. + + Args: + results (list[tuple[torch.Tensor]] | list[str]]): per image + pre_eval results or predict segmentation map for + computing evaluation metric. + logger (logging.Logger | str | None): Logger used for printing + related information during evaluation. Default: None. + + Returns: + dict[str: float]: evaluate results of the total dataset + or each separate + dataset if `self.separate_eval=True`. + """ + assert len(results) == self.cumulative_sizes[-1], \ + ('Dataset and results have different sizes: ' + f'{self.cumulative_sizes[-1]} v.s. {len(results)}') + + # Check whether all the datasets support evaluation + for dataset in self.datasets: + assert hasattr(dataset, 'evaluate'), \ + f'{type(dataset)} does not implement evaluate function' + + if self.separate_eval: + dataset_idx = -1 + total_eval_results = dict() + for size, dataset in zip(self.cumulative_sizes, self.datasets): + start_idx = 0 if dataset_idx == -1 else \ + self.cumulative_sizes[dataset_idx] + end_idx = self.cumulative_sizes[dataset_idx + 1] + + results_per_dataset = results[start_idx:end_idx] + print_log( + f'\nEvaluateing {dataset.img_dir} with ' + f'{len(results_per_dataset)} images now', + logger=logger) + + eval_results_per_dataset = dataset.evaluate( + results_per_dataset, logger=logger, **kwargs) + dataset_idx += 1 + for k, v in eval_results_per_dataset.items(): + total_eval_results.update({f'{dataset_idx}_{k}': v}) + + return total_eval_results + + if len(set([type(ds) for ds in self.datasets])) != 1: + raise NotImplementedError( + 'All the datasets should have same types when ' + 'self.separate_eval=False') + else: + if mmcv.is_list_of(results, np.ndarray) or mmcv.is_list_of( + results, str): + # merge the generators of gt_seg_maps + gt_seg_maps = chain( + *[dataset.get_gt_seg_maps() for dataset in self.datasets]) + else: + # if the results are `pre_eval` results, + # we do not need gt_seg_maps to evaluate + gt_seg_maps = None + eval_results = self.datasets[0].evaluate( + results, gt_seg_maps=gt_seg_maps, logger=logger, **kwargs) + return eval_results + + def get_dataset_idx_and_sample_idx(self, indice): + """Return dataset and sample index when given an indice of + ConcatDataset. + + Args: + indice (int): indice of sample in ConcatDataset + + Returns: + int: the index of sub dataset the sample belong to + int: the index of sample in its corresponding subset + """ + if indice < 0: + if -indice > len(self): + raise ValueError( + 'absolute value of index should not exceed dataset length') + indice = len(self) + indice + dataset_idx = bisect.bisect_right(self.cumulative_sizes, indice) + if dataset_idx == 0: + sample_idx = indice + else: + sample_idx = indice - self.cumulative_sizes[dataset_idx - 1] + return dataset_idx, sample_idx + + def format_results(self, results, imgfile_prefix, indices=None, **kwargs): + """format result for every sample of ConcatDataset.""" + if indices is None: + indices = list(range(len(self))) + + assert isinstance(results, list), 'results must be a list.' + assert isinstance(indices, list), 'indices must be a list.' + + ret_res = [] + for i, indice in enumerate(indices): + dataset_idx, sample_idx = self.get_dataset_idx_and_sample_idx( + indice) + res = self.datasets[dataset_idx].format_results( + [results[i]], + imgfile_prefix + f'/{dataset_idx}', + indices=[sample_idx], + **kwargs) + ret_res.append(res) + return sum(ret_res, []) + + def pre_eval(self, preds, indices): + """do pre eval for every sample of ConcatDataset.""" + # In order to compat with batch inference + if not isinstance(indices, list): + indices = [indices] + if not isinstance(preds, list): + preds = [preds] + ret_res = [] + for i, indice in enumerate(indices): + dataset_idx, sample_idx = self.get_dataset_idx_and_sample_idx( + indice) + res = self.datasets[dataset_idx].pre_eval(preds[i], sample_idx) + ret_res.append(res) + return sum(ret_res, []) diff --git a/segmentation/mmseg_custom/datasets/mapillary.py b/segmentation/mmseg_custom/datasets/mapillary.py new file mode 100644 index 0000000000000000000000000000000000000000..7a5a124eda2074ef83bee8209a6b0f1e3a8d4466 --- /dev/null +++ b/segmentation/mmseg_custom/datasets/mapillary.py @@ -0,0 +1,48 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +from mmseg.datasets.builder import DATASETS +from mmseg.datasets.custom import CustomDataset + + +@DATASETS.register_module() +class MapillaryDataset(CustomDataset): + """Mapillary dataset. + """ + CLASSES = ('Bird', 'Ground Animal', 'Curb', 'Fence', 'Guard Rail', 'Barrier', + 'Wall', 'Bike Lane', 'Crosswalk - Plain', 'Curb Cut', 'Parking', 'Pedestrian Area', + 'Rail Track', 'Road', 'Service Lane', 'Sidewalk', 'Bridge', 'Building', 'Tunnel', + 'Person', 'Bicyclist', 'Motorcyclist', 'Other Rider', 'Lane Marking - Crosswalk', + 'Lane Marking - General', 'Mountain', 'Sand', 'Sky', 'Snow', 'Terrain', 'Vegetation', + 'Water', 'Banner', 'Bench', 'Bike Rack', 'Billboard', 'Catch Basin', 'CCTV Camera', + 'Fire Hydrant', 'Junction Box', 'Mailbox', 'Manhole', 'Phone Booth', 'Pothole', + 'Street Light', 'Pole', 'Traffic Sign Frame', 'Utility Pole', 'Traffic Light', + 'Traffic Sign (Back)', 'Traffic Sign (Front)', 'Trash Can', 'Bicycle', 'Boat', + 'Bus', 'Car', 'Caravan', 'Motorcycle', 'On Rails', 'Other Vehicle', 'Trailer', + 'Truck', 'Wheeled Slow', 'Car Mount', 'Ego Vehicle', 'Unlabeled') + + PALETTE = [[165, 42, 42], [0, 192, 0], [196, 196, 196], [190, 153, 153], + [180, 165, 180], [90, 120, 150], [102, 102, 156], [128, 64, 255], + [140, 140, 200], [170, 170, 170], [250, 170, 160], [96, 96, 96], + [230, 150, 140], [128, 64, 128], [110, 110, 110], [244, 35, 232], + [150, 100, 100], [70, 70, 70], [150, 120, 90], [220, 20, 60], + [255, 0, 0], [255, 0, 100], [255, 0, 200], [200, 128, 128], + [255, 255, 255], [64, 170, 64], [230, 160, 50], [70, 130, 180], + [190, 255, 255], [152, 251, 152], [107, 142, 35], [0, 170, 30], + [255, 255, 128], [250, 0, 30], [100, 140, 180], [220, 220, 220], + [220, 128, 128], [222, 40, 40], [100, 170, 30], [40, 40, 40], + [33, 33, 33], [100, 128, 160], [142, 0, 0], [70, 100, 150], + [210, 170, 100], [153, 153, 153], [128, 128, 128], [0, 0, 80], + [250, 170, 30], [192, 192, 192], [220, 220, 0], [140, 140, 20], + [119, 11, 32], [150, 0, 255], [0, 60, 100], [0, 0, 142], [0, 0, 90], + [0, 0, 230], [0, 80, 100], [128, 64, 64], [0, 0, 110], [0, 0, 70], + [0, 0, 192], [32, 32, 32], [120, 10, 10], [0, 0, 0]] + + def __init__(self, **kwargs): + super(MapillaryDataset, self).__init__( + img_suffix='.jpg', + seg_map_suffix='.png', + reduce_zero_label=False, + **kwargs) diff --git a/segmentation/mmseg_custom/datasets/nyu_depth_v2.py b/segmentation/mmseg_custom/datasets/nyu_depth_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..65615cb090592a743cd3a48eca129cecf32a603f --- /dev/null +++ b/segmentation/mmseg_custom/datasets/nyu_depth_v2.py @@ -0,0 +1,41 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- +from mmseg.datasets.builder import DATASETS +from mmseg.datasets.custom import CustomDataset + + +@DATASETS.register_module() +class NYUDepthV2Dataset(CustomDataset): + """NYU Depth V2 dataset. + """ + + CLASSES = ('wall', 'floor', 'cabinet', 'bed', 'chair', + 'sofa', 'table', 'door', 'window', 'bookshelf', + 'picture', 'counter', 'blinds', 'desk', 'shelves', + 'curtain', 'dresser', 'pillow', 'mirror', 'floor mat', + 'clothes', 'ceiling', 'books', 'refridgerator', 'television', + 'paper', 'towel', 'shower curtain', 'box', 'whiteboard', + 'person', 'night stand', 'toilet', 'sink', 'lamp', + 'bathtub', 'bag', 'otherstructure', 'otherfurniture', 'otherprop') + + PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], + [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], + [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], + [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], + [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], + [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], + [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], + [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], + [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], + [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],] + + def __init__(self, split, **kwargs): + super(NYUDepthV2Dataset, self).__init__( + img_suffix='.png', + seg_map_suffix='.png', + split=split, + reduce_zero_label=True, + **kwargs) diff --git a/segmentation/mmseg_custom/datasets/pipelines/__init__.py b/segmentation/mmseg_custom/datasets/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b0f53e3d9f2d0b4c90d489d0934106ce95b12b1c --- /dev/null +++ b/segmentation/mmseg_custom/datasets/pipelines/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .formatting import DefaultFormatBundle, ToMask +from .transform import MapillaryHack, PadShortSide, SETR_Resize + +__all__ = [ + 'DefaultFormatBundle', 'ToMask', 'SETR_Resize', + 'PadShortSide', 'MapillaryHack' +] diff --git a/segmentation/mmseg_custom/datasets/pipelines/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/datasets/pipelines/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb57cf95afdbe402da56ecae3ad24e30d475defd Binary files /dev/null and b/segmentation/mmseg_custom/datasets/pipelines/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/datasets/pipelines/__pycache__/formatting.cpython-39.pyc b/segmentation/mmseg_custom/datasets/pipelines/__pycache__/formatting.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa19c03342bdd3205ba878900e462fdf000f543b Binary files /dev/null and b/segmentation/mmseg_custom/datasets/pipelines/__pycache__/formatting.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/datasets/pipelines/__pycache__/transform.cpython-39.pyc b/segmentation/mmseg_custom/datasets/pipelines/__pycache__/transform.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..152b9331a4fcf1c27c8faaaedc7dc91415c65594 Binary files /dev/null and b/segmentation/mmseg_custom/datasets/pipelines/__pycache__/transform.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/datasets/pipelines/formatting.py b/segmentation/mmseg_custom/datasets/pipelines/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..d1a41ad6365a2470f337ad2b6a19713167e04fa1 --- /dev/null +++ b/segmentation/mmseg_custom/datasets/pipelines/formatting.py @@ -0,0 +1,82 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +from mmcv.parallel import DataContainer as DC +from mmseg.datasets.builder import PIPELINES +from mmseg.datasets.pipelines.formatting import to_tensor + + +@PIPELINES.register_module(force=True) +class DefaultFormatBundle(object): + """Default formatting bundle. + + It simplifies the pipeline of formatting common fields, including "img" + and "gt_semantic_seg". These fields are formatted as follows. + + - img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True) + - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, + (3)to DataContainer (stack=True) + """ + def __call__(self, results): + """Call function to transform and format common fields in results. + + Args: + results (dict): Result dict contains the data to convert. + + Returns: + dict: The result dict contains the data that is formatted with + default bundle. + """ + + if 'img' in results: + img = results['img'] + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)) + results['img'] = DC(to_tensor(img), stack=True) + if 'gt_semantic_seg' in results: + # convert to long + results['gt_semantic_seg'] = DC(to_tensor( + results['gt_semantic_seg'][None, ...].astype(np.int64)), + stack=True) + if 'gt_masks' in results: + results['gt_masks'] = DC(to_tensor(results['gt_masks'])) + if 'gt_labels' in results: + results['gt_labels'] = DC(to_tensor(results['gt_labels'])) + + return results + + def __repr__(self): + return self.__class__.__name__ + + +@PIPELINES.register_module() +class ToMask(object): + """Transfer gt_semantic_seg to binary mask and generate gt_labels.""" + def __init__(self, ignore_index=255): + self.ignore_index = ignore_index + + def __call__(self, results): + gt_semantic_seg = results['gt_semantic_seg'] + gt_labels = np.unique(gt_semantic_seg) + # remove ignored region + gt_labels = gt_labels[gt_labels != self.ignore_index] + + gt_masks = [] + for class_id in gt_labels: + gt_masks.append(gt_semantic_seg == class_id) + + if len(gt_masks) == 0: + # Some image does not have annotation (all ignored) + gt_masks = np.empty((0, ) + results['pad_shape'][:-1], dtype=np.int64) + gt_labels = np.empty((0, ), dtype=np.int64) + else: + gt_masks = np.asarray(gt_masks, dtype=np.int64) + gt_labels = np.asarray(gt_labels, dtype=np.int64) + + results['gt_labels'] = gt_labels + results['gt_masks'] = gt_masks + return results + + def __repr__(self): + return self.__class__.__name__ + \ + f'(ignore_index={self.ignore_index})' diff --git a/segmentation/mmseg_custom/datasets/pipelines/transform.py b/segmentation/mmseg_custom/datasets/pipelines/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..444afc1e446dd576f46f0a7480057b3b581d20a4 --- /dev/null +++ b/segmentation/mmseg_custom/datasets/pipelines/transform.py @@ -0,0 +1,350 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import mmcv +import numpy as np +from mmseg.datasets.builder import PIPELINES + + +@PIPELINES.register_module() +class SETR_Resize(object): + """Resize images & seg. + + This transform resizes the input image to some scale. If the input dict + contains the key "scale", then the scale in the input dict is used, + otherwise the specified scale in the init method is used. + + ``img_scale`` can either be a tuple (single-scale) or a list of tuple + (multi-scale). There are 3 multiscale modes: + + - ``ratio_range is not None``: randomly sample a ratio from the ratio range + and multiply it with the image scale. + + - ``ratio_range is None and multiscale_mode == "range"``: randomly sample a + scale from the a range. + + - ``ratio_range is None and multiscale_mode == "value"``: randomly sample a + scale from multiple scales. + + Args: + img_scale (tuple or list[tuple]): Images scales for resizing. + multiscale_mode (str): Either "range" or "value". + ratio_range (tuple[float]): (min_ratio, max_ratio) + keep_ratio (bool): Whether to keep the aspect ratio when resizing the + image. + """ + def __init__(self, + img_scale=None, + multiscale_mode='range', + ratio_range=None, + keep_ratio=True, + crop_size=None, + setr_multi_scale=False): + + if img_scale is None: + self.img_scale = None + else: + if isinstance(img_scale, list): + self.img_scale = img_scale + else: + self.img_scale = [img_scale] + # assert mmcv.is_list_of(self.img_scale, tuple) + + if ratio_range is not None: + # mode 1: given a scale and a range of image ratio + assert len(self.img_scale) == 1 + else: + # mode 2: given multiple scales or a range of scales + assert multiscale_mode in ['value', 'range'] + + self.multiscale_mode = multiscale_mode + self.ratio_range = ratio_range + self.keep_ratio = keep_ratio + self.crop_size = crop_size + self.setr_multi_scale = setr_multi_scale + + @staticmethod + def random_select(img_scales): + """Randomly select an img_scale from given candidates. + + Args: + img_scales (list[tuple]): Images scales for selection. + + Returns: + (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, + where ``img_scale`` is the selected image scale and + ``scale_idx`` is the selected index in the given candidates. + """ + + assert mmcv.is_list_of(img_scales, tuple) + scale_idx = np.random.randint(len(img_scales)) + img_scale = img_scales[scale_idx] + return img_scale, scale_idx + + @staticmethod + def random_sample(img_scales): + """Randomly sample an img_scale when ``multiscale_mode=='range'``. + + Args: + img_scales (list[tuple]): Images scale range for sampling. + There must be two tuples in img_scales, which specify the lower + and uper bound of image scales. + + Returns: + (tuple, None): Returns a tuple ``(img_scale, None)``, where + ``img_scale`` is sampled scale and None is just a placeholder + to be consistent with :func:`random_select`. + """ + + assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2 + img_scale_long = [max(s) for s in img_scales] + img_scale_short = [min(s) for s in img_scales] + long_edge = np.random.randint( + min(img_scale_long), + max(img_scale_long) + 1) + short_edge = np.random.randint( + min(img_scale_short), + max(img_scale_short) + 1) + img_scale = (long_edge, short_edge) + return img_scale, None + + @staticmethod + def random_sample_ratio(img_scale, ratio_range): + """Randomly sample an img_scale when ``ratio_range`` is specified. + + A ratio will be randomly sampled from the range specified by + ``ratio_range``. Then it would be multiplied with ``img_scale`` to + generate sampled scale. + + Args: + img_scale (tuple): Images scale base to multiply with ratio. + ratio_range (tuple[float]): The minimum and maximum ratio to scale + the ``img_scale``. + + Returns: + (tuple, None): Returns a tuple ``(scale, None)``, where + ``scale`` is sampled ratio multiplied with ``img_scale`` and + None is just a placeholder to be consistent with + :func:`random_select`. + """ + + assert isinstance(img_scale, tuple) and len(img_scale) == 2 + min_ratio, max_ratio = ratio_range + assert min_ratio <= max_ratio + ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio + scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio) + return scale, None + + def _random_scale(self, results): + """Randomly sample an img_scale according to ``ratio_range`` and + ``multiscale_mode``. + + If ``ratio_range`` is specified, a ratio will be sampled and be + multiplied with ``img_scale``. + If multiple scales are specified by ``img_scale``, a scale will be + sampled according to ``multiscale_mode``. + Otherwise, single scale will be used. + + Args: + results (dict): Result dict from :obj:`dataset`. + + Returns: + dict: Two new keys 'scale` and 'scale_idx` are added into + ``results``, which would be used by subsequent pipelines. + """ + + if self.ratio_range is not None: + scale, scale_idx = self.random_sample_ratio( + self.img_scale[0], self.ratio_range) + elif len(self.img_scale) == 1: + scale, scale_idx = self.img_scale[0], 0 + elif self.multiscale_mode == 'range': + scale, scale_idx = self.random_sample(self.img_scale) + elif self.multiscale_mode == 'value': + scale, scale_idx = self.random_select(self.img_scale) + else: + raise NotImplementedError + + results['scale'] = scale + results['scale_idx'] = scale_idx + + def _resize_img(self, results): + """Resize images with ``results['scale']``.""" + + if self.keep_ratio: + if self.setr_multi_scale: + if min(results['scale']) < self.crop_size[0]: + new_short = self.crop_size[0] + else: + new_short = min(results['scale']) + + h, w = results['img'].shape[:2] + if h > w: + new_h, new_w = new_short * h / w, new_short + else: + new_h, new_w = new_short, new_short * w / h + results['scale'] = (new_h, new_w) + + img, scale_factor = mmcv.imrescale(results['img'], + results['scale'], + return_scale=True) + # the w_scale and h_scale has minor difference + # a real fix should be done in the mmcv.imrescale in the future + new_h, new_w = img.shape[:2] + h, w = results['img'].shape[:2] + w_scale = new_w / w + h_scale = new_h / h + else: + img, w_scale, h_scale = mmcv.imresize(results['img'], + results['scale'], + return_scale=True) + scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], + dtype=np.float32) + results['img'] = img + results['img_shape'] = img.shape + results['pad_shape'] = img.shape # in case that there is no padding + results['scale_factor'] = scale_factor + results['keep_ratio'] = self.keep_ratio + + def _resize_seg(self, results): + """Resize semantic segmentation map with ``results['scale']``.""" + for key in results.get('seg_fields', []): + if self.keep_ratio: + gt_seg = mmcv.imrescale(results[key], + results['scale'], + interpolation='nearest') + else: + gt_seg = mmcv.imresize(results[key], + results['scale'], + interpolation='nearest') + results['gt_semantic_seg'] = gt_seg + + def __call__(self, results): + """Call function to resize images, bounding boxes, masks, semantic + segmentation map. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', + 'keep_ratio' keys are added into result dict. + """ + + if 'scale' not in results: + self._random_scale(results) + self._resize_img(results) + self._resize_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += (f'(img_scale={self.img_scale}, ' + f'multiscale_mode={self.multiscale_mode}, ' + f'ratio_range={self.ratio_range}, ' + f'keep_ratio={self.keep_ratio})') + return repr_str + + +@PIPELINES.register_module() +class PadShortSide(object): + """Pad the image & mask. + + Pad to the minimum size that is equal or larger than a number. + Added keys are "pad_shape", "pad_fixed_size", + + Args: + size (int, optional): Fixed padding size. + pad_val (float, optional): Padding value. Default: 0. + seg_pad_val (float, optional): Padding value of segmentation map. + Default: 255. + """ + def __init__(self, size=None, pad_val=0, seg_pad_val=255): + self.size = size + self.pad_val = pad_val + self.seg_pad_val = seg_pad_val + # only one of size and size_divisor should be valid + assert size is not None + + def _pad_img(self, results): + """Pad images according to ``self.size``.""" + h, w = results['img'].shape[:2] + new_h = max(h, self.size) + new_w = max(w, self.size) + padded_img = mmcv.impad(results['img'], + shape=(new_h, new_w), + pad_val=self.pad_val) + + results['img'] = padded_img + results['pad_shape'] = padded_img.shape + # results['unpad_shape'] = (h, w) + + def _pad_seg(self, results): + """Pad masks according to ``results['pad_shape']``.""" + for key in results.get('seg_fields', []): + results[key] = mmcv.impad(results[key], + shape=results['pad_shape'][:2], + pad_val=self.seg_pad_val) + + def __call__(self, results): + """Call function to pad images, masks, semantic segmentation maps. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Updated result dict. + """ + h, w = results['img'].shape[:2] + if h >= self.size and w >= self.size: # 短边比窗口大,跳过 + pass + else: + self._pad_img(results) + self._pad_seg(results) + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(size={self.size}, pad_val={self.pad_val})' + return repr_str + + +@PIPELINES.register_module() +class MapillaryHack(object): + """map MV 65 class to 19 class like Cityscapes.""" + def __init__(self): + self.map = [[13, 24, 41], [2, 15], [17], [6], [3], + [45, 47], [48], [50], [30], [29], [27], [19], [20, 21, 22], + [55], [61], [54], [58], [57], [52]] + + self.others = [i for i in range(66)] + for i in self.map: + for j in i: + if j in self.others: + self.others.remove(j) + + def __call__(self, results): + """Call function to process the image with gamma correction. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: Processed results. + """ + gt_map = results['gt_semantic_seg'] + # others -> 255 + new_gt_map = np.zeros_like(gt_map) + + for value in self.others: + new_gt_map[gt_map == value] = 255 + + for index, map in enumerate(self.map): + for value in map: + new_gt_map[gt_map == value] = index + + results['gt_semantic_seg'] = new_gt_map + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + return repr_str diff --git a/segmentation/mmseg_custom/models/__init__.py b/segmentation/mmseg_custom/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e80ff3f9777d289d6174058a92e203de9207dd1b --- /dev/null +++ b/segmentation/mmseg_custom/models/__init__.py @@ -0,0 +1,12 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .backbones import * # noqa: F401,F403 +from .decode_heads import * # noqa: F401,F403 +from .losses import * # noqa: F401,F403 +from .plugins import * # noqa: F401,F403 +from .segmentors import * # noqa: F401,F403 +from .utils import * # noqa: F401,F403 diff --git a/segmentation/mmseg_custom/models/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd33ecf753f0eeaac06fc3f99133046b0df56666 Binary files /dev/null and b/segmentation/mmseg_custom/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/__pycache__/builder.cpython-39.pyc b/segmentation/mmseg_custom/models/__pycache__/builder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fd304692a51f38d36bc94f17cb6d61408fdf370 Binary files /dev/null and b/segmentation/mmseg_custom/models/__pycache__/builder.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/backbones/__init__.py b/segmentation/mmseg_custom/models/backbones/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a45c14fc71fa2d44146713256008b220602f8ceb --- /dev/null +++ b/segmentation/mmseg_custom/models/backbones/__init__.py @@ -0,0 +1,9 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .intern_image import InternImage + +__all__ = ['InternImage'] diff --git a/segmentation/mmseg_custom/models/backbones/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/models/backbones/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c171c9f8517072f017dcaaa6e71b4a06c087a2de Binary files /dev/null and b/segmentation/mmseg_custom/models/backbones/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/backbones/__pycache__/intern_image.cpython-39.pyc b/segmentation/mmseg_custom/models/backbones/__pycache__/intern_image.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7342bb341c5356c882f88745b5f52b09567525ac Binary files /dev/null and b/segmentation/mmseg_custom/models/backbones/__pycache__/intern_image.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/backbones/intern_image.py b/segmentation/mmseg_custom/models/backbones/intern_image.py new file mode 100644 index 0000000000000000000000000000000000000000..3eaa6212189415a2d62d7eeaa02c23d61ee83074 --- /dev/null +++ b/segmentation/mmseg_custom/models/backbones/intern_image.py @@ -0,0 +1,725 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from collections import OrderedDict + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from mmcv.cnn import constant_init, trunc_normal_init +from mmcv.runner import _load_checkpoint +from mmseg.models.builder import BACKBONES +from mmseg.utils import get_root_logger +from ops_dcnv3 import modules as dcnv3 +from timm.models.layers import DropPath, trunc_normal_ + + +class to_channels_first(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 3, 1, 2) + + +class to_channels_last(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 2, 3, 1) + + +def build_norm_layer(dim, + norm_layer, + in_format='channels_last', + out_format='channels_last', + eps=1e-6): + layers = [] + if norm_layer == 'BN': + if in_format == 'channels_last': + layers.append(to_channels_first()) + layers.append(nn.BatchNorm2d(dim)) + if out_format == 'channels_last': + layers.append(to_channels_last()) + elif norm_layer == 'LN': + if in_format == 'channels_first': + layers.append(to_channels_last()) + layers.append(nn.LayerNorm(dim, eps=eps)) + if out_format == 'channels_first': + layers.append(to_channels_first()) + else: + raise NotImplementedError( + f'build_norm_layer does not support {norm_layer}') + return nn.Sequential(*layers) + + +def build_act_layer(act_layer): + if act_layer == 'ReLU': + return nn.ReLU(inplace=True) + elif act_layer == 'SiLU': + return nn.SiLU(inplace=True) + elif act_layer == 'GELU': + return nn.GELU() + + raise NotImplementedError(f'build_act_layer does not support {act_layer}') + + +class CrossAttention(nn.Module): + r""" Cross Attention Module + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. Default: 8 + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: False. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + attn_drop (float, optional): Dropout ratio of attention weight. + Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + attn_head_dim (int, optional): Dimension of attention head. + out_dim (int, optional): Dimension of output. + """ + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0., + attn_head_dim=None, + out_dim=None): + super().__init__() + if out_dim is None: + out_dim = dim + self.num_heads = num_heads + head_dim = dim // num_heads + if attn_head_dim is not None: + head_dim = attn_head_dim + all_head_dim = head_dim * self.num_heads + self.scale = qk_scale or head_dim ** -0.5 + assert all_head_dim == dim + + self.q = nn.Linear(dim, all_head_dim, bias=False) + self.k = nn.Linear(dim, all_head_dim, bias=False) + self.v = nn.Linear(dim, all_head_dim, bias=False) + + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.k_bias = nn.Parameter(torch.zeros(all_head_dim)) + self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) + else: + self.q_bias = None + self.k_bias = None + self.v_bias = None + + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(all_head_dim, out_dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, k=None, v=None): + B, N, C = x.shape + N_k = k.shape[1] + N_v = v.shape[1] + + q_bias, k_bias, v_bias = None, None, None + if self.q_bias is not None: + q_bias = self.q_bias + k_bias = self.k_bias + v_bias = self.v_bias + + q = F.linear(input=x, weight=self.q.weight, bias=q_bias) + q = q.reshape(B, N, 1, self.num_heads, + -1).permute(2, 0, 3, 1, + 4).squeeze(0) # (B, N_head, N_q, dim) + + k = F.linear(input=k, weight=self.k.weight, bias=k_bias) + k = k.reshape(B, N_k, 1, self.num_heads, -1).permute(2, 0, 3, 1, + 4).squeeze(0) + + v = F.linear(input=v, weight=self.v.weight, bias=v_bias) + v = v.reshape(B, N_v, 1, self.num_heads, -1).permute(2, 0, 3, 1, + 4).squeeze(0) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) # (B, N_head, N_q, N_k) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + + return x + + +class AttentiveBlock(nn.Module): + r"""Attentive Block + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. Default: 8 + qkv_bias (bool, optional): If True, add a learnable bias to q, k, v. + Default: False. + qk_scale (float | None, optional): Override default qk scale of + head_dim ** -0.5 if set. Default: None. + drop (float, optional): Dropout rate. Default: 0.0. + attn_drop (float, optional): Attention dropout rate. Default: 0.0. + drop_path (float | tuple[float], optional): Stochastic depth rate. + Default: 0.0. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm. + attn_head_dim (int, optional): Dimension of attention head. Default: None. + out_dim (int, optional): Dimension of output. Default: None. + """ + + def __init__(self, + dim, + num_heads, + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + norm_layer='LN', + attn_head_dim=None, + out_dim=None): + super().__init__() + + self.norm1_q = build_norm_layer(dim, norm_layer, eps=1e-6) + self.norm1_k = build_norm_layer(dim, norm_layer, eps=1e-6) + self.norm1_v = build_norm_layer(dim, norm_layer, eps=1e-6) + self.cross_dcn = CrossAttention(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop, + attn_head_dim=attn_head_dim, + out_dim=out_dim) + + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + + def forward(self, + x_q, + x_kv, + pos_q, + pos_k, + bool_masked_pos, + rel_pos_bias=None): + x_q = self.norm1_q(x_q + pos_q) + x_k = self.norm1_k(x_kv + pos_k) + x_v = self.norm1_v(x_kv) + + x = self.cross_dcn(x_q, k=x_k, v=x_v) + + return x + + +class AttentionPoolingBlock(AttentiveBlock): + + def forward(self, x): + x_q = x.mean(1, keepdim=True) + x_kv = x + pos_q, pos_k = 0, 0 + x = super().forward(x_q, x_kv, pos_q, pos_k, + bool_masked_pos=None, + rel_pos_bias=None) + x = x.squeeze(1) + return x + + +class StemLayer(nn.Module): + r""" Stem layer of InternImage + Args: + in_chans (int): number of input channels + out_chans (int): number of output channels + act_layer (str): activation layer + norm_layer (str): normalization layer + """ + + def __init__(self, + in_chans=3, + out_chans=96, + act_layer='GELU', + norm_layer='BN'): + super().__init__() + self.conv1 = nn.Conv2d(in_chans, + out_chans // 2, + kernel_size=3, + stride=2, + padding=1) + self.norm1 = build_norm_layer(out_chans // 2, norm_layer, + 'channels_first', 'channels_first') + self.act = build_act_layer(act_layer) + self.conv2 = nn.Conv2d(out_chans // 2, + out_chans, + kernel_size=3, + stride=2, + padding=1) + self.norm2 = build_norm_layer(out_chans, norm_layer, 'channels_first', + 'channels_last') + + def forward(self, x): + x = self.conv1(x) + x = self.norm1(x) + x = self.act(x) + x = self.conv2(x) + x = self.norm2(x) + return x + + +class DownsampleLayer(nn.Module): + r""" Downsample layer of InternImage + Args: + channels (int): number of input channels + norm_layer (str): normalization layer + """ + + def __init__(self, channels, norm_layer='LN'): + super().__init__() + self.conv = nn.Conv2d(channels, + 2 * channels, + kernel_size=3, + stride=2, + padding=1, + bias=False) + self.norm = build_norm_layer(2 * channels, norm_layer, + 'channels_first', 'channels_last') + + def forward(self, x): + x = self.conv(x.permute(0, 3, 1, 2)) + x = self.norm(x) + return x + + +class MLPLayer(nn.Module): + r""" MLP layer of InternImage + Args: + in_features (int): number of input features + hidden_features (int): number of hidden features + out_features (int): number of output features + act_layer (str): activation layer + drop (float): dropout rate + """ + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer='GELU', + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = build_act_layer(act_layer) + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class InternImageLayer(nn.Module): + r""" Basic layer of InternImage + Args: + core_op (nn.Module): core operation of InternImage + channels (int): number of input channels + groups (list): Groups of each block. + mlp_ratio (float): ratio of mlp hidden features to input channels + drop (float): dropout rate + drop_path (float): drop path rate + act_layer (str): activation layer + norm_layer (str): normalization layer + post_norm (bool): whether to use post normalization + layer_scale (float): layer scale + offset_scale (float): offset scale + with_cp (bool): whether to use checkpoint + """ + + def __init__(self, + core_op, + channels, + groups, + mlp_ratio=4., + drop=0., + drop_path=0., + act_layer='GELU', + norm_layer='LN', + post_norm=False, + layer_scale=None, + offset_scale=1.0, + with_cp=False, + dw_kernel_size=None, # for InternImage-H/G + res_post_norm=False, # for InternImage-H/G + center_feature_scale=False, + use_dcn_v4_op=False): # for InternImage-H/G + super().__init__() + self.channels = channels + self.groups = groups + self.mlp_ratio = mlp_ratio + self.with_cp = with_cp + + self.norm1 = build_norm_layer(channels, 'LN') + self.post_norm = post_norm + self.dcn = core_op( + channels=channels, + kernel_size=3, + stride=1, + pad=1, + dilation=1, + group=groups, + offset_scale=offset_scale, + act_layer=act_layer, + norm_layer=norm_layer, + dw_kernel_size=dw_kernel_size, # for InternImage-H/G + center_feature_scale=center_feature_scale, + use_dcn_v4_op=use_dcn_v4_op) # for InternImage-H/G + self.drop_path = DropPath(drop_path) if drop_path > 0. \ + else nn.Identity() + self.norm2 = build_norm_layer(channels, 'LN') + self.mlp = MLPLayer(in_features=channels, + hidden_features=int(channels * mlp_ratio), + act_layer=act_layer, + drop=drop) + self.layer_scale = layer_scale is not None + if self.layer_scale: + self.gamma1 = nn.Parameter(layer_scale * torch.ones(channels), + requires_grad=True) + self.gamma2 = nn.Parameter(layer_scale * torch.ones(channels), + requires_grad=True) + self.res_post_norm = res_post_norm + if res_post_norm: + self.res_post_norm1 = build_norm_layer(channels, 'LN') + self.res_post_norm2 = build_norm_layer(channels, 'LN') + + def forward(self, x): + + def _inner_forward(x): + if not self.layer_scale: + if self.post_norm: + x = x + self.drop_path(self.norm1(self.dcn(x))) + x = x + self.drop_path(self.norm2(self.mlp(x))) + elif self.res_post_norm: # for InternImage-H/G + x = x + self.drop_path(self.res_post_norm1(self.dcn(self.norm1(x)))) + x = x + self.drop_path(self.res_post_norm2(self.mlp(self.norm2(x)))) + else: + x = x + self.drop_path(self.dcn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + if self.post_norm: + x = x + self.drop_path(self.gamma1 * self.norm1(self.dcn(x))) + x = x + self.drop_path(self.gamma2 * self.norm2(self.mlp(x))) + else: + x = x + self.drop_path(self.gamma1 * self.dcn(self.norm1(x))) + x = x + self.drop_path(self.gamma2 * self.mlp(self.norm2(x))) + return x + + if self.with_cp and x.requires_grad: + x = checkpoint.checkpoint(_inner_forward, x) + else: + x = _inner_forward(x) + return x + + +class InternImageBlock(nn.Module): + r""" Block of InternImage + Args: + core_op (nn.Module): core operation of InternImage + channels (int): number of input channels + depths (list): Depth of each block. + groups (list): Groups of each block. + mlp_ratio (float): ratio of mlp hidden features to input channels + drop (float): dropout rate + drop_path (float): drop path rate + act_layer (str): activation layer + norm_layer (str): normalization layer + post_norm (bool): whether to use post normalization + layer_scale (float): layer scale + offset_scale (float): offset scale + with_cp (bool): whether to use checkpoint + """ + + def __init__(self, + core_op, + channels, + depth, + groups, + downsample=True, + mlp_ratio=4., + drop=0., + drop_path=0., + act_layer='GELU', + norm_layer='LN', + post_norm=False, + offset_scale=1.0, + layer_scale=None, + with_cp=False, + dw_kernel_size=None, # for InternImage-H/G + post_norm_block_ids=None, # for InternImage-H/G + res_post_norm=False, # for InternImage-H/G + center_feature_scale=False, # for InternImage-H/G + use_dcn_v4_op=False): + super().__init__() + self.channels = channels + self.depth = depth + self.post_norm = post_norm + self.center_feature_scale = center_feature_scale + + self.blocks = nn.ModuleList([ + InternImageLayer( + core_op=core_op, + channels=channels, + groups=groups, + mlp_ratio=mlp_ratio, + drop=drop, + drop_path=drop_path[i] if isinstance( + drop_path, list) else drop_path, + act_layer=act_layer, + norm_layer=norm_layer, + post_norm=post_norm, + layer_scale=layer_scale, + offset_scale=offset_scale, + with_cp=with_cp, + dw_kernel_size=dw_kernel_size, # for InternImage-H/G + res_post_norm=res_post_norm, # for InternImage-H/G + center_feature_scale=center_feature_scale, # for InternImage-H/G + use_dcn_v4_op=use_dcn_v4_op + ) for i in range(depth) + ]) + if not self.post_norm or center_feature_scale: + self.norm = build_norm_layer(channels, 'LN') + self.post_norm_block_ids = post_norm_block_ids + if post_norm_block_ids is not None: # for InternImage-H/G + self.post_norms = nn.ModuleList( + [build_norm_layer(channels, 'LN', eps=1e-6) for _ in post_norm_block_ids] + ) + self.downsample = DownsampleLayer( + channels=channels, norm_layer=norm_layer) if downsample else None + + def forward(self, x, return_wo_downsample=False): + for i, blk in enumerate(self.blocks): + x = blk(x) + if (self.post_norm_block_ids is not None) and (i in self.post_norm_block_ids): + index = self.post_norm_block_ids.index(i) + x = self.post_norms[index](x) # for InternImage-H/G + if not self.post_norm or self.center_feature_scale: + x = self.norm(x) + if return_wo_downsample: + x_ = x + if self.downsample is not None: + x = self.downsample(x) + + if return_wo_downsample: + return x, x_ + return x + + +@BACKBONES.register_module() +class InternImage(nn.Module): + r""" InternImage + A PyTorch impl of : `InternImage: Exploring Large-Scale Vision Foundation Models with Deformable Convolutions` - + https://arxiv.org/pdf/2103.14030 + Args: + core_op (str): Core operator. Default: 'DCNv3' + channels (int): Number of the first stage. Default: 64 + depths (list): Depth of each block. Default: [3, 4, 18, 5] + groups (list): Groups of each block. Default: [3, 6, 12, 24] + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + drop_rate (float): Probability of an element to be zeroed. Default: 0. + drop_path_rate (float): Stochastic depth rate. Default: 0. + act_layer (str): Activation layer. Default: 'GELU' + norm_layer (str): Normalization layer. Default: 'LN' + layer_scale (bool): Whether to use layer scale. Default: False + cls_scale (bool): Whether to use class scale. Default: False + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + dw_kernel_size (int): Size of the dwconv. Default: None + level2_post_norm (bool): Whether to use level2 post norm. Default: False + level2_post_norm_block_ids (list): Indexes of post norm blocks. Default: None + res_post_norm (bool): Whether to use res post norm. Default: False + center_feature_scale (bool): Whether to use center feature scale. Default: False + """ + + def __init__(self, + core_op='DCNv3', + channels=64, + depths=[3, 4, 18, 5], + groups=[3, 6, 12, 24], + mlp_ratio=4., + drop_rate=0., + drop_path_rate=0.2, + drop_path_type='linear', + act_layer='GELU', + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + with_cp=False, + dw_kernel_size=None, # for InternImage-H/G + level2_post_norm=False, # for InternImage-H/G + level2_post_norm_block_ids=None, # for InternImage-H/G + res_post_norm=False, # for InternImage-H/G + center_feature_scale=False, # for InternImage-H/G + use_dcn_v4_op=False, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + init_cfg=None, + **kwargs): + super().__init__() + self.core_op = core_op + self.num_levels = len(depths) + self.depths = depths + self.channels = channels + self.num_features = int(channels * 2**(self.num_levels - 1)) + self.post_norm = post_norm + self.mlp_ratio = mlp_ratio + self.init_cfg = init_cfg + self.out_indices = out_indices + self.level2_post_norm_block_ids = level2_post_norm_block_ids + self.frozen_stages = frozen_stages + + logger = get_root_logger() + logger.info(f'using core type: {core_op}') + logger.info(f'using activation layer: {act_layer}') + logger.info(f'using main norm layer: {norm_layer}') + logger.info(f'using dpr: {drop_path_type}, {drop_path_rate}') + logger.info(f'level2_post_norm: {level2_post_norm}') + logger.info(f'level2_post_norm_block_ids: {level2_post_norm_block_ids}') + logger.info(f'res_post_norm: {res_post_norm}') + logger.info(f'use_dcn_v4_op: {use_dcn_v4_op}') + + in_chans = 3 + self.patch_embed = StemLayer(in_chans=in_chans, + out_chans=channels, + act_layer=act_layer, + norm_layer=norm_layer) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [ + x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) + ] + if drop_path_type == 'uniform': + for i in range(len(dpr)): + dpr[i] = drop_path_rate + + self.levels = nn.ModuleList() + for i in range(self.num_levels): + post_norm_block_ids = level2_post_norm_block_ids if level2_post_norm and ( + i == 2) else None # for InternImage-H/G + level = InternImageBlock( + core_op=getattr(dcnv3, core_op), + channels=int(channels * 2**i), + depth=depths[i], + groups=groups[i], + mlp_ratio=self.mlp_ratio, + drop=drop_rate, + drop_path=dpr[sum(depths[:i]):sum(depths[:i + 1])], + act_layer=act_layer, + norm_layer=norm_layer, + post_norm=post_norm, + downsample=(i < self.num_levels - 1), + layer_scale=layer_scale, + offset_scale=offset_scale, + with_cp=with_cp, + dw_kernel_size=dw_kernel_size, # for InternImage-H/G + post_norm_block_ids=post_norm_block_ids, # for InternImage-H/G + res_post_norm=res_post_norm, # for InternImage-H/G + center_feature_scale=center_feature_scale, # for InternImage-H/G + use_dcn_v4_op=use_dcn_v4_op, + ) + self.levels.append(level) + + self.num_layers = len(depths) + self.apply(self._init_weights) + self.apply(self._init_deform_weights) + self._freeze_stages() + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer frozen.""" + super(InternImage, self).train(mode) + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + for level in self.levels[:self.frozen_stages]: + level.eval() + for param in level.parameters(): + param.requires_grad = False + + def init_weights(self): + logger = get_root_logger() + if self.init_cfg is None: + logger.warn(f'No pre-trained weights for ' + f'{self.__class__.__name__}, ' + f'training start from scratch') + for m in self.modules(): + if isinstance(m, nn.Linear): + trunc_normal_init(m, std=.02, bias=0.) + elif isinstance(m, nn.LayerNorm): + constant_init(m, 1.0) + else: + assert 'checkpoint' in self.init_cfg, f'Only support ' \ + f'specify `Pretrained` in ' \ + f'`init_cfg` in ' \ + f'{self.__class__.__name__} ' + ckpt = _load_checkpoint(self.init_cfg.checkpoint, + logger=logger, + map_location='cpu') + if 'state_dict' in ckpt: + _state_dict = ckpt['state_dict'] + elif 'model' in ckpt: + _state_dict = ckpt['model'] + else: + _state_dict = ckpt + + state_dict = OrderedDict() + for k, v in _state_dict.items(): + if k.startswith('backbone.'): + state_dict[k[9:]] = v + else: + state_dict[k] = v + + # strip prefix of state_dict + if list(state_dict.keys())[0].startswith('module.'): + state_dict = {k[7:]: v for k, v in state_dict.items()} + + # load state_dict + meg = self.load_state_dict(state_dict, False) + logger.info(meg) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + def _init_deform_weights(self, m): + if isinstance(m, getattr(dcnv3, self.core_op)): + m._reset_parameters() + + def forward(self, x): + x = self.patch_embed(x) + x = self.pos_drop(x) + + seq_out = [] + for level_idx, level in enumerate(self.levels): + x, x_ = level(x, return_wo_downsample=True) + if level_idx in self.out_indices: + seq_out.append(x_.permute(0, 3, 1, 2).contiguous()) + return seq_out diff --git a/segmentation/mmseg_custom/models/builder.py b/segmentation/mmseg_custom/models/builder.py new file mode 100644 index 0000000000000000000000000000000000000000..94b9ff773f47ffe14e9ff7dc309eea12eae9e4b1 --- /dev/null +++ b/segmentation/mmseg_custom/models/builder.py @@ -0,0 +1,23 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings # noqa: F401,F403 + +from mmcv.utils import Registry + +TRANSFORMER = Registry('Transformer') +MASK_ASSIGNERS = Registry('mask_assigner') +MATCH_COST = Registry('match_cost') + + +def build_match_cost(cfg): + """Build Match Cost.""" + return MATCH_COST.build(cfg) + + +def build_assigner(cfg): + """Build Assigner.""" + return MASK_ASSIGNERS.build(cfg) + + +def build_transformer(cfg): + """Build Transformer.""" + return TRANSFORMER.build(cfg) diff --git a/segmentation/mmseg_custom/models/decode_heads/__init__.py b/segmentation/mmseg_custom/models/decode_heads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..52a7761c070b4ff4a92604bb92cfb5fae52cedf5 --- /dev/null +++ b/segmentation/mmseg_custom/models/decode_heads/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .mask2former_head import Mask2FormerHead +from .maskformer_head import MaskFormerHead + +__all__ = [ + 'MaskFormerHead', + 'Mask2FormerHead', +] diff --git a/segmentation/mmseg_custom/models/decode_heads/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/models/decode_heads/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1460c5115c8be2a6eb8a8396939432e783a27bc2 Binary files /dev/null and b/segmentation/mmseg_custom/models/decode_heads/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/decode_heads/__pycache__/mask2former_head.cpython-39.pyc b/segmentation/mmseg_custom/models/decode_heads/__pycache__/mask2former_head.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a63334130ef1556b9213f9d8f110253f0154bc68 Binary files /dev/null and b/segmentation/mmseg_custom/models/decode_heads/__pycache__/mask2former_head.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/decode_heads/__pycache__/maskformer_head.cpython-39.pyc b/segmentation/mmseg_custom/models/decode_heads/__pycache__/maskformer_head.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73de71d4fc2e03c19af25d2269d83db5104d10b0 Binary files /dev/null and b/segmentation/mmseg_custom/models/decode_heads/__pycache__/maskformer_head.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/decode_heads/mask2former_head.py b/segmentation/mmseg_custom/models/decode_heads/mask2former_head.py new file mode 100644 index 0000000000000000000000000000000000000000..0dcf98d6c908282d04fdb8c926e2697aa97a763c --- /dev/null +++ b/segmentation/mmseg_custom/models/decode_heads/mask2former_head.py @@ -0,0 +1,582 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer_sequence) +from mmcv.ops import point_sample +from mmcv.runner import ModuleList, force_fp32 +from mmseg.models.builder import HEADS, build_loss +from mmseg.models.decode_heads.decode_head import BaseDecodeHead + +from ...core import build_sampler, multi_apply, reduce_mean +from ..builder import build_assigner +from ..utils import get_uncertain_point_coords_with_randomness + + +@HEADS.register_module() +class Mask2FormerHead(BaseDecodeHead): + """Implements the Mask2Former head. + + See `Masked-attention Mask Transformer for Universal Image + Segmentation `_ for details. + + Args: + in_channels (list[int]): Number of channels in the input feature map. + feat_channels (int): Number of channels for features. + out_channels (int): Number of channels for output. + num_classes (int): Number of classes. + num_things_classes (int): Number of things. + num_stuff_classes (int): Number of stuff. + num_queries (int): Number of query in Transformer decoder. + pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel + decoder. Defaults to None. + enforce_decoder_input_project (bool, optional): Whether to add + a layer to change the embed_dim of tranformer encoder in + pixel decoder to the embed_dim of transformer decoder. + Defaults to False. + transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for + transformer decoder. Defaults to None. + positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for + transformer decoder position encoding. Defaults to None. + loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification + loss. Defaults to None. + loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss. + Defaults to None. + loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss. + Defaults to None. + train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of + Mask2Former head. + test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of + Mask2Former head. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + def __init__(self, + in_channels, + feat_channels, + out_channels, + num_classes=80, + num_things_classes=None, + num_stuff_classes=None, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=None, + enforce_decoder_input_project=False, + transformer_decoder=None, + positional_encoding=None, + loss_cls=None, + loss_mask=None, + loss_dice=None, + train_cfg=None, + test_cfg=None, + init_cfg=None, + **kwargs): + super(Mask2FormerHead, self).__init__( + in_channels=in_channels, + channels=feat_channels, + num_classes=num_classes, + init_cfg=init_cfg, + input_transform='multiple_select', + **kwargs) + self.num_classes = num_classes + self.num_queries = num_queries + self.num_transformer_feat_level = num_transformer_feat_level + self.num_heads = transformer_decoder.transformerlayers. \ + attn_cfgs.num_heads + self.num_transformer_decoder_layers = transformer_decoder.num_layers + assert pixel_decoder.encoder.transformerlayers. \ + attn_cfgs.num_levels == num_transformer_feat_level + pixel_decoder_ = copy.deepcopy(pixel_decoder) + pixel_decoder_.update( + in_channels=in_channels, + feat_channels=feat_channels, + out_channels=out_channels) + self.pixel_decoder = build_plugin_layer(pixel_decoder_)[1] + self.transformer_decoder = build_transformer_layer_sequence( + transformer_decoder) + self.decoder_embed_dims = self.transformer_decoder.embed_dims + + self.decoder_input_projs = ModuleList() + # from low resolution to high resolution + for _ in range(num_transformer_feat_level): + if (self.decoder_embed_dims != feat_channels + or enforce_decoder_input_project): + self.decoder_input_projs.append( + Conv2d( + feat_channels, self.decoder_embed_dims, kernel_size=1)) + else: + self.decoder_input_projs.append(nn.Identity()) + self.decoder_positional_encoding = build_positional_encoding( + positional_encoding) + self.query_embed = nn.Embedding(self.num_queries, feat_channels) + self.query_feat = nn.Embedding(self.num_queries, feat_channels) + # from low resolution to high resolution + self.level_embed = nn.Embedding(self.num_transformer_feat_level, + feat_channels) + + self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1) + self.mask_embed = nn.Sequential( + nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), + nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True), + nn.Linear(feat_channels, out_channels)) + self.conv_seg = None # fix a bug here (conv_seg is not used) + + self.test_cfg = test_cfg + self.train_cfg = train_cfg + if train_cfg: + self.assigner = build_assigner(self.train_cfg.assigner) + self.sampler = build_sampler(self.train_cfg.sampler, context=self) + self.num_points = self.train_cfg.get('num_points', 12544) + self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0) + self.importance_sample_ratio = self.train_cfg.get( + 'importance_sample_ratio', 0.75) + + self.class_weight = loss_cls.class_weight + self.loss_cls = build_loss(loss_cls) + self.loss_mask = build_loss(loss_mask) + self.loss_dice = build_loss(loss_dice) + + def init_weights(self): + for m in self.decoder_input_projs: + if isinstance(m, Conv2d): + caffe2_xavier_init(m, bias=0) + + self.pixel_decoder.init_weights() + + for p in self.transformer_decoder.parameters(): + if p.dim() > 1: + nn.init.xavier_normal_(p) + + def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list, + gt_masks_list, img_metas): + """Compute classification and mask targets for all images for a decoder + layer. + + Args: + cls_scores_list (list[Tensor]): Mask score logits from a single + decoder layer for all images. Each with shape [num_queries, + cls_out_channels]. + mask_preds_list (list[Tensor]): Mask logits from a single decoder + layer for all images. Each with shape [num_queries, h, w]. + gt_labels_list (list[Tensor]): Ground truth class indices for all + images. Each with shape (n, ), n is the sum of number of stuff + type and number of instance in a image. + gt_masks_list (list[Tensor]): Ground truth mask for each image, + each with shape (n, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + tuple[list[Tensor]]: a tuple containing the following targets. + + - labels_list (list[Tensor]): Labels of all images. + Each with shape [num_queries, ]. + - label_weights_list (list[Tensor]): Label weights of all + images.Each with shape [num_queries, ]. + - mask_targets_list (list[Tensor]): Mask targets of all images. + Each with shape [num_queries, h, w]. + - mask_weights_list (list[Tensor]): Mask weights of all images. + Each with shape [num_queries, ]. + - num_total_pos (int): Number of positive samples in all + images. + - num_total_neg (int): Number of negative samples in all + images. + """ + (labels_list, label_weights_list, mask_targets_list, mask_weights_list, + pos_inds_list, + neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list, + mask_preds_list, gt_labels_list, + gt_masks_list, img_metas) + + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, mask_targets_list, + mask_weights_list, num_total_pos, num_total_neg) + + def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, + img_metas): + """Compute classification and mask targets for one image. + + Args: + cls_score (Tensor): Mask score logits from a single decoder layer + for one image. Shape (num_queries, cls_out_channels). + mask_pred (Tensor): Mask logits for a single decoder layer for one + image. Shape (num_queries, h, w). + gt_labels (Tensor): Ground truth class indices for one image with + shape (num_gts, ). + gt_masks (Tensor): Ground truth mask for each image, each with + shape (num_gts, h, w). + img_metas (dict): Image informtation. + + Returns: + tuple[Tensor]: A tuple containing the following for one image. + + - labels (Tensor): Labels of each image. \ + shape (num_queries, ). + - label_weights (Tensor): Label weights of each image. \ + shape (num_queries, ). + - mask_targets (Tensor): Mask targets of each image. \ + shape (num_queries, h, w). + - mask_weights (Tensor): Mask weights of each image. \ + shape (num_queries, ). + - pos_inds (Tensor): Sampled positive indices for each \ + image. + - neg_inds (Tensor): Sampled negative indices for each \ + image. + """ + # sample points + num_queries = cls_score.shape[0] + num_gts = gt_labels.shape[0] + + point_coords = torch.rand((1, self.num_points, 2), + device=cls_score.device) + # shape (num_queries, num_points) + mask_points_pred = point_sample( + mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1, + 1)).squeeze(1) + # shape (num_gts, num_points) + gt_points_masks = point_sample( + gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1, + 1)).squeeze(1) + + # assign and sample + assign_result = self.assigner.assign(cls_score, mask_points_pred, + gt_labels, gt_points_masks, + img_metas) + sampling_result = self.sampler.sample(assign_result, mask_pred, + gt_masks) + pos_inds = sampling_result.pos_inds + neg_inds = sampling_result.neg_inds + + # label target + labels = gt_labels.new_full((self.num_queries, ), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds] + label_weights = gt_labels.new_ones((self.num_queries, )) + + # mask target + mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds] + mask_weights = mask_pred.new_zeros((self.num_queries, )) + mask_weights[pos_inds] = 1.0 + + return (labels, label_weights, mask_targets, mask_weights, pos_inds, + neg_inds) + + def loss_single(self, cls_scores, mask_preds, gt_labels_list, + gt_masks_list, img_metas): + """Loss function for outputs from a single decoder layer. + + Args: + cls_scores (Tensor): Mask score logits from a single decoder layer + for all images. Shape (batch_size, num_queries, + cls_out_channels). Note `cls_out_channels` should includes + background. + mask_preds (Tensor): Mask logits for a pixel decoder for all + images. Shape (batch_size, num_queries, h, w). + gt_labels_list (list[Tensor]): Ground truth class indices for each + image, each with shape (num_gts, ). + gt_masks_list (list[Tensor]): Ground truth mask for each image, + each with shape (num_gts, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + tuple[Tensor]: Loss components for outputs from a single \ + decoder layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + mask_preds_list = [mask_preds[i] for i in range(num_imgs)] + (labels_list, label_weights_list, mask_targets_list, mask_weights_list, + num_total_pos, + num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, + gt_labels_list, gt_masks_list, + img_metas) + # shape (batch_size, num_queries) + labels = torch.stack(labels_list, dim=0) + # shape (batch_size, num_queries) + label_weights = torch.stack(label_weights_list, dim=0) + # shape (num_total_gts, h, w) + mask_targets = torch.cat(mask_targets_list, dim=0) + # shape (batch_size, num_queries) + mask_weights = torch.stack(mask_weights_list, dim=0) + + # classfication loss + # shape (batch_size * num_queries, ) + cls_scores = cls_scores.flatten(0, 1) + labels = labels.flatten(0, 1) + label_weights = label_weights.flatten(0, 1) + + class_weight = cls_scores.new_tensor(self.class_weight) + loss_cls = self.loss_cls( + cls_scores, + labels, + label_weights, + avg_factor=class_weight[labels].sum()) + + num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) + num_total_masks = max(num_total_masks, 1) + + # extract positive ones + # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w) + mask_preds = mask_preds[mask_weights > 0] + + if mask_targets.shape[0] == 0: + # zero match + loss_dice = mask_preds.sum() + loss_mask = mask_preds.sum() + return loss_cls, loss_mask, loss_dice + + with torch.no_grad(): + points_coords = get_uncertain_point_coords_with_randomness( + mask_preds.unsqueeze(1), None, self.num_points, + self.oversample_ratio, self.importance_sample_ratio) + # shape (num_total_gts, h, w) -> (num_total_gts, num_points) + mask_point_targets = point_sample( + mask_targets.unsqueeze(1).float(), points_coords).squeeze(1) + # shape (num_queries, h, w) -> (num_queries, num_points) + mask_point_preds = point_sample( + mask_preds.unsqueeze(1), points_coords).squeeze(1) + + # dice loss + loss_dice = self.loss_dice( + mask_point_preds, mask_point_targets, avg_factor=num_total_masks) + + # mask loss + # shape (num_queries, num_points) -> (num_queries * num_points, ) + mask_point_preds = mask_point_preds.reshape(-1,1) + # shape (num_total_gts, num_points) -> (num_total_gts * num_points, ) + mask_point_targets = mask_point_targets.reshape(-1) + loss_mask = self.loss_mask( + mask_point_preds, + mask_point_targets, + avg_factor=num_total_masks * self.num_points) + + return loss_cls, loss_mask, loss_dice + + @force_fp32(apply_to=('all_cls_scores', 'all_mask_preds')) + def loss(self, all_cls_scores, all_mask_preds, gt_labels_list, + gt_masks_list, img_metas): + """Loss function. + + Args: + all_cls_scores (Tensor): Classification scores for all decoder + layers with shape [num_decoder, batch_size, num_queries, + cls_out_channels]. + all_mask_preds (Tensor): Mask scores for all decoder layers with + shape [num_decoder, batch_size, num_queries, h, w]. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (n, ). n is the sum of number of stuff type + and number of instance in a image. + gt_masks_list (list[Tensor]): Ground truth mask for each image with + shape (n, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_dec_layers = len(all_cls_scores) + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)] + img_metas_list = [img_metas for _ in range(num_dec_layers)] + losses_cls, losses_mask, losses_dice = multi_apply( + self.loss_single, all_cls_scores, all_mask_preds, + all_gt_labels_list, all_gt_masks_list, img_metas_list) + + loss_dict = dict() + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_mask'] = losses_mask[-1] + loss_dict['loss_dice'] = losses_dice[-1] + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_mask_i, loss_dice_i in zip( + losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i + loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i + num_dec_layer += 1 + return loss_dict + + def forward_head(self, decoder_out, mask_feature, attn_mask_target_size): + """Forward for head part which is called after every decoder layer. + + Args: + decoder_out (Tensor): in shape (num_queries, batch_size, c). + mask_feature (Tensor): in shape (batch_size, c, h, w). + attn_mask_target_size (tuple[int, int]): target attention + mask size. + + Returns: + tuple: A tuple contain three elements. + + - cls_pred (Tensor): Classification scores in shape \ + (batch_size, num_queries, cls_out_channels). \ + Note `cls_out_channels` should includes background. + - mask_pred (Tensor): Mask scores in shape \ + (batch_size, num_queries,h, w). + - attn_mask (Tensor): Attention mask in shape \ + (batch_size * num_heads, num_queries, h, w). + """ + decoder_out = self.transformer_decoder.post_norm(decoder_out) + decoder_out = decoder_out.transpose(0, 1) + # shape (num_queries, batch_size, c) + cls_pred = self.cls_embed(decoder_out) + # shape (num_queries, batch_size, c) + mask_embed = self.mask_embed(decoder_out) + # shape (num_queries, batch_size, h, w) + mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature) + attn_mask = F.interpolate( + mask_pred, + attn_mask_target_size, + mode='bilinear', + align_corners=False) + # shape (num_queries, batch_size, h, w) -> + # (batch_size * num_head, num_queries, h, w) + attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat( + (1, self.num_heads, 1, 1)).flatten(0, 1) + attn_mask = attn_mask.sigmoid() < 0.5 + attn_mask = attn_mask.detach() + + return cls_pred, mask_pred, attn_mask + + def forward(self, feats, img_metas): + """Forward function. + + Args: + feats (list[Tensor]): Multi scale Features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + tuple: A tuple contains two elements. + + - cls_pred_list (list[Tensor)]: Classification logits \ + for each decoder layer. Each is a 3D-tensor with shape \ + (batch_size, num_queries, cls_out_channels). \ + Note `cls_out_channels` should includes background. + - mask_pred_list (list[Tensor]): Mask logits for each \ + decoder layer. Each with shape (batch_size, num_queries, \ + h, w). + """ + try: + batch_size = len(img_metas) + except: + batch_size = 1 + mask_features, multi_scale_memorys = self.pixel_decoder(feats) + # multi_scale_memorys (from low resolution to high resolution) + decoder_inputs = [] + decoder_positional_encodings = [] + for i in range(self.num_transformer_feat_level): + decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i]) + # shape (batch_size, c, h, w) -> (h*w, batch_size, c) + decoder_input = decoder_input.flatten(2).permute(2, 0, 1) + level_embed = self.level_embed.weight[i].view(1, 1, -1) + decoder_input = decoder_input + level_embed + # shape (batch_size, c, h, w) -> (h*w, batch_size, c) + mask = decoder_input.new_zeros( + (batch_size, ) + multi_scale_memorys[i].shape[-2:], + dtype=torch.bool) + decoder_positional_encoding = self.decoder_positional_encoding( + mask) + decoder_positional_encoding = decoder_positional_encoding.flatten( + 2).permute(2, 0, 1) + decoder_inputs.append(decoder_input) + decoder_positional_encodings.append(decoder_positional_encoding) + # shape (num_queries, c) -> (num_queries, batch_size, c) + query_feat = self.query_feat.weight.unsqueeze(1).repeat( + (1, batch_size, 1)) + query_embed = self.query_embed.weight.unsqueeze(1).repeat( + (1, batch_size, 1)) + + cls_pred_list = [] + mask_pred_list = [] + cls_pred, mask_pred, attn_mask = self.forward_head( + query_feat, mask_features, multi_scale_memorys[0].shape[-2:]) + cls_pred_list.append(cls_pred) + mask_pred_list.append(mask_pred) + + for i in range(self.num_transformer_decoder_layers): + level_idx = i % self.num_transformer_feat_level + # if a mask is all True(all background), then set it all False. + attn_mask[torch.where( + attn_mask.sum(-1) == attn_mask.shape[-1])] = False + + # cross_attn + self_attn + layer = self.transformer_decoder.layers[i] + attn_masks = [attn_mask, None] + query_feat = layer( + query=query_feat, + key=decoder_inputs[level_idx], + value=decoder_inputs[level_idx], + query_pos=query_embed, + key_pos=decoder_positional_encodings[level_idx], + attn_masks=attn_masks, + query_key_padding_mask=None, + # here we do not apply masking on padded region + key_padding_mask=None) + cls_pred, mask_pred, attn_mask = self.forward_head( + query_feat, mask_features, multi_scale_memorys[ + (i + 1) % self.num_transformer_feat_level].shape[-2:]) + + cls_pred_list.append(cls_pred) + mask_pred_list.append(mask_pred) + + return cls_pred_list, mask_pred_list + + def forward_train(self, x, img_metas, gt_semantic_seg, gt_labels, + gt_masks): + """Forward function for training mode. + + Args: + x (list[Tensor]): Multi-level features from the upstream network, + each is a 4D-tensor. + img_metas (list[Dict]): List of image information. + gt_semantic_seg (list[tensor]):Each element is the ground truth + of semantic segmentation with the shape (N, H, W). + train_cfg (dict): The training config, which not been used in + maskformer. + gt_labels (list[Tensor]): Each element is ground truth labels of + each box, shape (num_gts,). + gt_masks (list[BitmapMasks]): Each element is masks of instances + of a image, shape (num_gts, h, w). + + Returns: + losses (dict[str, Tensor]): a dictionary of loss components + """ + + # forward + all_cls_scores, all_mask_preds = self(x, img_metas) + + # loss + losses = self.loss(all_cls_scores, all_mask_preds, gt_labels, gt_masks, + img_metas) + + return losses + + def forward_test(self, inputs, img_metas, test_cfg): + """Test segment without test-time aumengtation. + + Only the output of last decoder layers was used. + + Args: + inputs (list[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + test_cfg (dict): Testing config. + + Returns: + seg_mask (Tensor): Predicted semantic segmentation logits. + """ + all_cls_scores, all_mask_preds = self(inputs, img_metas) + cls_score, mask_pred = all_cls_scores[-1], all_mask_preds[-1] + # ori_h, ori_w, _ = img_metas[0]['ori_shape'] + + # semantic inference + cls_score = F.softmax(cls_score, dim=-1)[..., :-1] + mask_pred = mask_pred.sigmoid() + seg_mask = torch.einsum('bqc,bqhw->bchw', cls_score, mask_pred) + return seg_mask diff --git a/segmentation/mmseg_custom/models/decode_heads/maskformer_head.py b/segmentation/mmseg_custom/models/decode_heads/maskformer_head.py new file mode 100644 index 0000000000000000000000000000000000000000..c35cd1320b4b9d61017c7b726e60c1dda6c60c4c --- /dev/null +++ b/segmentation/mmseg_custom/models/decode_heads/maskformer_head.py @@ -0,0 +1,519 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import Conv2d, build_plugin_layer, kaiming_init +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer_sequence) +from mmcv.runner import force_fp32 +from mmseg.models.builder import HEADS, build_loss +from mmseg.models.decode_heads.decode_head import BaseDecodeHead + +from ...core import multi_apply, reduce_mean +from ..builder import build_assigner, build_transformer + + +@HEADS.register_module() +class MaskFormerHead(BaseDecodeHead): + """Implements the MaskFormer head. + + See `paper: Per-Pixel Classification is Not All You Need + for Semantic Segmentation` + for details. + + Args: + in_channels (list[int]): Number of channels in the input feature map. + feat_channels (int): Number channels for feature. + out_channels (int): Number channels for output. + num_things_classes (int): Number of things. + num_stuff_classes (int): Number of stuff. + num_queries (int): Number of query in Transformer. + pixel_decoder (obj:`mmcv.ConfigDict`|dict): Config for pixel decoder. + Defaults to None. + enforce_decoder_input_project (bool, optional): Whether to add a layer + to change the embed_dim of tranformer encoder in pixel decoder to + the embed_dim of transformer decoder. Defaults to False. + transformer_decoder (obj:`mmcv.ConfigDict`|dict): Config for + transformer decoder. Defaults to None. + positional_encoding (obj:`mmcv.ConfigDict`|dict): Config for + transformer decoder position encoding. Defaults to None. + loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the classification + loss. Defaults to `CrossEntropyLoss`. + loss_mask (obj:`mmcv.ConfigDict`|dict): Config of the mask loss. + Defaults to `FocalLoss`. + loss_dice (obj:`mmcv.ConfigDict`|dict): Config of the dice loss. + Defaults to `DiceLoss`. + train_cfg (obj:`mmcv.ConfigDict`|dict): Training config of Maskformer + head. + test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of Maskformer + head. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + def __init__(self, + out_channels, + num_queries=100, + pixel_decoder=None, + enforce_decoder_input_project=False, + transformer_decoder=None, + positional_encoding=None, + loss_cls=dict( + type='CrossEntropyLoss', + bg_cls_weight=0.1, + use_sigmoid=False, + loss_weight=1.0, + class_weight=1.0), + loss_mask=dict( + type='FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=20.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + naive_dice=True, + loss_weight=1.0), + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=1.), + dice_cost=dict(type='DiceCost', weight=1.0, pred_act=True, + eps=1.0), + mask_cost=dict(type='MaskFocalLossCost', weight=20.0)), + **kwargs): + super(MaskFormerHead, self).__init__(input_transform='multiple_select', + **kwargs) + self.num_queries = num_queries + + pixel_decoder.update( + in_channels=self.in_channels, + feat_channels=self.channels, + out_channels=out_channels) + self.pixel_decoder = build_plugin_layer(pixel_decoder)[1] + self.transformer_decoder = build_transformer_layer_sequence( + transformer_decoder) + self.decoder_embed_dims = self.transformer_decoder.embed_dims + pixel_decoder_type = pixel_decoder.get('type') + if pixel_decoder_type == 'PixelDecoder' and ( + self.decoder_embed_dims != self.in_channels[-1] + or enforce_decoder_input_project): + self.decoder_input_proj = Conv2d( + self.in_channels[-1], self.decoder_embed_dims, kernel_size=1) + else: + self.decoder_input_proj = nn.Identity() + self.decoder_pe = build_positional_encoding(positional_encoding) + self.query_embed = nn.Embedding(self.num_queries, out_channels) + + self.cls_embed = nn.Linear(self.channels, self.num_classes + 1) + self.mask_embed = nn.Sequential( + nn.Linear(self.channels, self.channels), nn.ReLU(inplace=True), + nn.Linear(self.channels, self.channels), nn.ReLU(inplace=True), + nn.Linear(self.channels, out_channels)) + + self.assigner = build_assigner(assigner) + + self.bg_cls_weight = 0 + class_weight = loss_cls.get('class_weight', None) + if class_weight is not None and (self.__class__ is MaskFormerHead): + assert isinstance(class_weight, float), 'Expected ' \ + 'class_weight to have type float. Found ' \ + f'{type(class_weight)}.' + # NOTE following the official MaskFormerHead repo, bg_cls_weight + # means relative classification weight of the VOID class. + bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight) + assert isinstance(bg_cls_weight, float), 'Expected ' \ + 'bg_cls_weight to have type float. Found ' \ + f'{type(bg_cls_weight)}.' + class_weight = (self.num_classes + 1) * [class_weight] + # set VOID class as the last indice + class_weight[self.num_classes] = bg_cls_weight + loss_cls.update({'class_weight': class_weight}) + if 'bg_cls_weight' in loss_cls: + loss_cls.pop('bg_cls_weight') + self.bg_cls_weight = bg_cls_weight + + assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \ + 'The classification weight for loss and matcher should be' \ + 'exactly the same.' + assert loss_dice['loss_weight'] == assigner['dice_cost']['weight'], \ + f'The dice weight for loss and matcher' \ + f'should be exactly the same.' + assert loss_mask['loss_weight'] == assigner['mask_cost']['weight'], \ + 'The focal weight for loss and matcher should be' \ + 'exactly the same.' + self.loss_cls = build_loss(loss_cls) + self.loss_mask = build_loss(loss_mask) + self.loss_dice = build_loss(loss_dice) + + self.init_weights() + + def init_weights(self): + kaiming_init(self.decoder_input_proj, a=1) + + def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list, + gt_masks_list, img_metas): + """Compute classification and mask targets for all images for a decoder + layer. + + Args: + cls_scores_list (list[Tensor]): Mask score logits from a single + decoder layer for all images. Each with shape [num_queries, + cls_out_channels]. + mask_preds_list (list[Tensor]): Mask logits from a single decoder + layer for all images. Each with shape [num_queries, h, w]. + gt_labels_list (list[Tensor]): Ground truth class indices for all + images. Each with shape (n, ), n is the sum of number of stuff + type and number of instance in a image. + gt_masks_list (list[Tensor]): Ground truth mask for each image, + each with shape (n, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + tuple[list[Tensor]]: a tuple containing the following targets. + + - labels_list (list[Tensor]): Labels of all images. + Each with shape [num_queries, ]. + - label_weights_list (list[Tensor]): Label weights of all + images.Each with shape [num_queries, ]. + - mask_targets_list (list[Tensor]): Mask targets of all images. + Each with shape [num_queries, h, w]. + - mask_weights_list (list[Tensor]): Mask weights of all images. + Each with shape [num_queries, ]. + - num_total_pos (int): Number of positive samples in all + images. + - num_total_neg (int): Number of negative samples in all + images. + """ + (labels_list, label_weights_list, mask_targets_list, mask_weights_list, + pos_inds_list, + neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list, + mask_preds_list, gt_labels_list, + gt_masks_list, img_metas) + + num_total_pos = sum((inds.numel() for inds in pos_inds_list)) + num_total_neg = sum((inds.numel() for inds in neg_inds_list)) + return (labels_list, label_weights_list, mask_targets_list, + mask_weights_list, num_total_pos, num_total_neg) + + def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks, + img_metas): + """Compute classification and mask targets for one image. + + Args: + cls_score (Tensor): Mask score logits from a single decoder layer + for one image. Shape [num_queries, cls_out_channels]. + mask_pred (Tensor): Mask logits for a single decoder layer for one + image. Shape [num_queries, h, w]. + gt_labels (Tensor): Ground truth class indices for one image with + shape (n, ). n is the sum of number of stuff type and number + of instance in a image. + gt_masks (Tensor): Ground truth mask for each image, each with + shape (n, h, w). + img_metas (dict): Image informtation. + + Returns: + tuple[Tensor]: a tuple containing the following for one image. + + - labels (Tensor): Labels of each image. + shape [num_queries, ]. + - label_weights (Tensor): Label weights of each image. + shape [num_queries, ]. + - mask_targets (Tensor): Mask targets of each image. + shape [num_queries, h, w]. + - mask_weights (Tensor): Mask weights of each image. + shape [num_queries, ]. + - pos_inds (Tensor): Sampled positive indices for each image. + - neg_inds (Tensor): Sampled negative indices for each image. + """ + target_shape = mask_pred.shape[-2:] + gt_masks_downsampled = F.interpolate( + gt_masks.unsqueeze(1).float(), target_shape, + mode='nearest').squeeze(1).long() + # assign and sample + assign_result = self.assigner.assign(cls_score, mask_pred, gt_labels, + gt_masks_downsampled, img_metas) + # pos_ind: range from 1 to (self.num_classes) + # which represents the positive index + pos_inds = torch.nonzero(assign_result.gt_inds > 0, + as_tuple=False).squeeze(-1).unique() + neg_inds = torch.nonzero(assign_result.gt_inds == 0, + as_tuple=False).squeeze(-1).unique() + pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 + + # label target + labels = gt_labels.new_full((self.num_queries, ), + self.num_classes, + dtype=torch.long) + labels[pos_inds] = gt_labels[pos_assigned_gt_inds] + label_weights = gt_labels.new_ones(self.num_queries) + + # mask target + mask_targets = gt_masks[pos_assigned_gt_inds, :] + mask_weights = mask_pred.new_zeros((self.num_queries, )) + mask_weights[pos_inds] = 1.0 + + return (labels, label_weights, mask_targets, mask_weights, pos_inds, + neg_inds) + + @force_fp32(apply_to=('all_cls_scores', 'all_mask_preds')) + def loss(self, all_cls_scores, all_mask_preds, gt_labels_list, + gt_masks_list, img_metas): + """Loss function. + + Args: + all_cls_scores (Tensor): Classification scores for all decoder + layers with shape [num_decoder, batch_size, num_queries, + cls_out_channels]. + all_mask_preds (Tensor): Mask scores for all decoder layers with + shape [num_decoder, batch_size, num_queries, h, w]. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image with shape (n, ). n is the sum of number of stuff type + and number of instance in a image. + gt_masks_list (list[Tensor]): Ground truth mask for each image with + shape (n, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + dict[str, Tensor]: A dictionary of loss components. + """ + num_dec_layers = len(all_cls_scores) + all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)] + all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)] + img_metas_list = [img_metas for _ in range(num_dec_layers)] + losses_cls, losses_mask, losses_dice = multi_apply( + self.loss_single, all_cls_scores, all_mask_preds, + all_gt_labels_list, all_gt_masks_list, img_metas_list) + + loss_dict = dict() + # loss from the last decoder layer + loss_dict['loss_cls'] = losses_cls[-1] + loss_dict['loss_mask'] = losses_mask[-1] + loss_dict['loss_dice'] = losses_dice[-1] + # loss from other decoder layers + num_dec_layer = 0 + for loss_cls_i, loss_mask_i, loss_dice_i in zip( + losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]): + loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i + loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i + loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i + num_dec_layer += 1 + return loss_dict + + def loss_single(self, cls_scores, mask_preds, gt_labels_list, + gt_masks_list, img_metas): + """Loss function for outputs from a single decoder layer. + + Args: + cls_scores (Tensor): Mask score logits from a single decoder layer + for all images. Shape [batch_size, num_queries, + cls_out_channels]. + mask_preds (Tensor): Mask logits for a pixel decoder for all + images. Shape [batch_size, num_queries, h, w]. + gt_labels_list (list[Tensor]): Ground truth class indices for each + image, each with shape (n, ). n is the sum of number of stuff + types and number of instances in a image. + gt_masks_list (list[Tensor]): Ground truth mask for each image, + each with shape (n, h, w). + img_metas (list[dict]): List of image meta information. + + Returns: + tuple[Tensor]:Loss components for outputs from a single decoder + layer. + """ + num_imgs = cls_scores.size(0) + cls_scores_list = [cls_scores[i] for i in range(num_imgs)] + mask_preds_list = [mask_preds[i] for i in range(num_imgs)] + + (labels_list, label_weights_list, mask_targets_list, mask_weights_list, + num_total_pos, + num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list, + gt_labels_list, gt_masks_list, + img_metas) + # shape [batch_size, num_queries] + labels = torch.stack(labels_list, dim=0) + # shape [batch_size, num_queries] + label_weights = torch.stack(label_weights_list, dim=0) + # shape [num_gts, h, w] + mask_targets = torch.cat(mask_targets_list, dim=0) + # shape [batch_size, num_queries] + mask_weights = torch.stack(mask_weights_list, dim=0) + + # classfication loss + # shape [batch_size * num_queries, ] + cls_scores = cls_scores.flatten(0, 1) + # shape [batch_size * num_queries, ] + labels = labels.flatten(0, 1) + # shape [batch_size* num_queries, ] + label_weights = label_weights.flatten(0, 1) + + class_weight = cls_scores.new_ones(self.num_classes + 1) + class_weight[-1] = self.bg_cls_weight + + loss_cls = self.loss_cls( + cls_scores, + labels, + label_weights, + avg_factor=class_weight[labels].sum()) + + num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos])) + num_total_masks = max(num_total_masks, 1) + + # extract positive ones + mask_preds = mask_preds[mask_weights > 0] + target_shape = mask_targets.shape[-2:] + + if mask_targets.shape[0] == 0: + # zero match + loss_dice = mask_preds.sum() + loss_mask = mask_preds.sum() + return loss_cls, loss_mask, loss_dice + + # upsample to shape of target + # shape [num_gts, h, w] + mask_preds = F.interpolate( + mask_preds.unsqueeze(1), + target_shape, + mode='bilinear', + align_corners=False).squeeze(1) + + # dice loss + loss_dice = self.loss_dice( + mask_preds, mask_targets, avg_factor=num_total_masks) + + # mask loss + # FocalLoss support input of shape [n, num_class] + h, w = mask_preds.shape[-2:] + # shape [num_gts, h, w] -> [num_gts * h * w, 1] + mask_preds = mask_preds.reshape(-1, 1) + # shape [num_gts, h, w] -> [num_gts * h * w] + mask_targets = mask_targets.reshape(-1) + # target is (1 - mask_targets) !!! + print('mask_pred:', mask_preds.shape) + print('mask_targets:', mask_targets.shape) + loss_mask = self.loss_mask( + mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w) + + return loss_cls, loss_mask, loss_dice + + def forward(self, feats, img_metas): + """Forward function. + + Args: + feats (list[Tensor]): Features from the upstream network, each + is a 4D-tensor. + img_metas (list[dict]): List of image information. + + Returns: + all_cls_scores (Tensor): Classification scores for each + scale level. Each is a 4D-tensor with shape + [num_decoder, batch_size, num_queries, cls_out_channels]. + Note `cls_out_channels` should includes background. + all_mask_preds (Tensor): Mask scores for each decoder + layer. Each with shape [num_decoder, batch_size, + num_queries, h, w]. + """ + batch_size = len(img_metas) + input_img_h, input_img_w = img_metas[0]['pad_shape'][:-1] + # input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + padding_mask = feats[-1].new_ones( + (batch_size, input_img_h, input_img_w), dtype=torch.float32) + for i in range(batch_size): + img_h, img_w, _ = img_metas[i]['img_shape'] + padding_mask[i, :img_h, :img_w] = 0 + padding_mask = F.interpolate( + padding_mask.unsqueeze(1), + size=feats[-1].shape[-2:], + mode='nearest').to(torch.bool).squeeze(1) + # when backbone is swin, memory is output of last stage of swin. + # when backbone is r50, memory is output of tranformer encoder. + mask_features, memory = self.pixel_decoder(feats, img_metas) + pos_embed = self.decoder_pe(padding_mask) + memory = self.decoder_input_proj(memory) + # shape [batch_size, c, h, w] -> [h*w, batch_size, c] + memory = memory.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + # shape [batch_size, h * w] + padding_mask = padding_mask.flatten(1) + # shape = [num_queries, embed_dims] + query_embed = self.query_embed.weight + # shape = [num_queries, batch_size, embed_dims] + query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1) + target = torch.zeros_like(query_embed) + # shape [num_decoder, num_queries, batch_size, embed_dims] + out_dec = self.transformer_decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=padding_mask) + # shape [num_decoder, batch_size, num_queries, embed_dims] + out_dec = out_dec.transpose(1, 2) + + # cls_scores + all_cls_scores = self.cls_embed(out_dec) + + # mask_preds + mask_embed = self.mask_embed(out_dec) + all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed, + mask_features) + + return all_cls_scores, all_mask_preds + + def forward_train(self, + x, + img_metas, + gt_semantic_seg, + gt_labels, + gt_masks): + """Forward function for training mode. + + Args: + x (list[Tensor]): Multi-level features from the upstream network, + each is a 4D-tensor. + img_metas (list[Dict]): List of image information. + gt_semantic_seg (list[tensor]):Each element is the ground truth + of semantic segmentation with the shape (N, H, W). + train_cfg (dict): The training config, which not been used in + maskformer. + gt_labels (list[Tensor]): Each element is ground truth labels of + each box, shape (num_gts,). + gt_masks (list[BitmapMasks]): Each element is masks of instances + of a image, shape (num_gts, h, w). + + Returns: + losses (dict[str, Tensor]): a dictionary of loss components + """ + + # forward + all_cls_scores, all_mask_preds = self(x, img_metas) + + # loss + losses = self.loss(all_cls_scores, all_mask_preds, gt_labels, gt_masks, + img_metas) + + return losses + + def forward_test(self, inputs, img_metas, test_cfg): + """Test segment without test-time aumengtation. + + Only the output of last decoder layers was used. + + Args: + inputs (list[Tensor]): Multi-level features from the + upstream network, each is a 4D-tensor. + img_metas (list[dict]): List of image information. + test_cfg (dict): Testing config. + + Returns: + seg_mask (Tensor): Predicted semantic segmentation logits. + """ + all_cls_scores, all_mask_preds = self(inputs, img_metas) + cls_score, mask_pred = all_cls_scores[-1], all_mask_preds[-1] + ori_h, ori_w, _ = img_metas[0]['ori_shape'] + + # semantic inference + cls_score = F.softmax(cls_score, dim=-1)[..., :-1] + mask_pred = mask_pred.sigmoid() + seg_mask = torch.einsum('bqc,bqhw->bchw', cls_score, mask_pred) + return seg_mask diff --git a/segmentation/mmseg_custom/models/losses/__init__.py b/segmentation/mmseg_custom/models/losses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..50ed88a4480b99f857b0edb7dd523b5a3d5815b8 --- /dev/null +++ b/segmentation/mmseg_custom/models/losses/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy, + cross_entropy, mask_cross_entropy) +from .dice_loss import DiceLoss +from .focal_loss import FocalLoss +from .match_costs import (ClassificationCost, CrossEntropyLossCost, DiceCost, + MaskFocalLossCost) + +__all__ = [ + 'cross_entropy', 'binary_cross_entropy', 'mask_cross_entropy', + 'CrossEntropyLoss', 'DiceLoss', 'FocalLoss', 'ClassificationCost', + 'MaskFocalLossCost', 'DiceCost', 'CrossEntropyLossCost' +] diff --git a/segmentation/mmseg_custom/models/losses/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/models/losses/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..298f8c99a1be354dbede4e2d32b1f1696e3b5026 Binary files /dev/null and b/segmentation/mmseg_custom/models/losses/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/losses/__pycache__/cross_entropy_loss.cpython-39.pyc b/segmentation/mmseg_custom/models/losses/__pycache__/cross_entropy_loss.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45e1e1ece0ec073b334eaf678917210a3bed01c6 Binary files /dev/null and b/segmentation/mmseg_custom/models/losses/__pycache__/cross_entropy_loss.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/losses/__pycache__/dice_loss.cpython-39.pyc b/segmentation/mmseg_custom/models/losses/__pycache__/dice_loss.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53eac433d39eb4b778353eda91b2faa6a685bd26 Binary files /dev/null and b/segmentation/mmseg_custom/models/losses/__pycache__/dice_loss.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/losses/__pycache__/focal_loss.cpython-39.pyc b/segmentation/mmseg_custom/models/losses/__pycache__/focal_loss.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43ae6d8fd0d8176d4f227a84dd1f7ab0f42e6477 Binary files /dev/null and b/segmentation/mmseg_custom/models/losses/__pycache__/focal_loss.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/losses/__pycache__/match_costs.cpython-39.pyc b/segmentation/mmseg_custom/models/losses/__pycache__/match_costs.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7263146f4751c6f721eac8db6f210f28f03b1894 Binary files /dev/null and b/segmentation/mmseg_custom/models/losses/__pycache__/match_costs.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/losses/cross_entropy_loss.py b/segmentation/mmseg_custom/models/losses/cross_entropy_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..5766ea6e5c922adba40de672f4575aacc77719ce --- /dev/null +++ b/segmentation/mmseg_custom/models/losses/cross_entropy_loss.py @@ -0,0 +1,291 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmseg.models.builder import LOSSES +from mmseg.models.losses.utils import get_class_weight, weight_reduce_loss + + +def cross_entropy(pred, + label, + weight=None, + class_weight=None, + reduction='mean', + avg_factor=None, + ignore_index=-100, + avg_non_ignore=False): + """cross_entropy. The wrapper function for :func:`F.cross_entropy` + + Args: + pred (torch.Tensor): The prediction with shape (N, 1). + label (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + Default: None. + class_weight (list[float], optional): The weight for each class. + Default: None. + reduction (str, optional): The method used to reduce the loss. + Options are 'none', 'mean' and 'sum'. Default: 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Default: None. + ignore_index (int): Specifies a target value that is ignored and + does not contribute to the input gradients. When + ``avg_non_ignore `` is ``True``, and the ``reduction`` is + ``''mean''``, the loss is averaged over non-ignored targets. + Defaults: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` + """ + + # class_weight is a manual rescaling weight given to each class. + # If given, has to be a Tensor of size C element-wise losses + loss = F.cross_entropy( + pred, + label, + weight=class_weight, + reduction='none', + ignore_index=ignore_index) + + # apply weights and do the reduction + # average loss over non-ignored elements + # pytorch's official cross_entropy average loss over non-ignored elements + # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa + if (avg_factor is None) and avg_non_ignore and reduction == 'mean': + avg_factor = label.numel() - (label == ignore_index).sum().item() + if weight is not None: + weight = weight.float() + loss = weight_reduce_loss( + loss, weight=weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def _expand_onehot_labels(labels, label_weights, target_shape, ignore_index): + """Expand onehot labels to match the size of prediction.""" + bin_labels = labels.new_zeros(target_shape) + valid_mask = (labels >= 0) & (labels != ignore_index) + inds = torch.nonzero(valid_mask, as_tuple=True) + + if inds[0].numel() > 0: + if labels.dim() == 3: + bin_labels[inds[0], labels[valid_mask], inds[1], inds[2]] = 1 + else: + bin_labels[inds[0], labels[valid_mask]] = 1 + + valid_mask = valid_mask.unsqueeze(1).expand(target_shape).float() + + if label_weights is None: + bin_label_weights = valid_mask + else: + bin_label_weights = label_weights.unsqueeze(1).expand(target_shape) + bin_label_weights = bin_label_weights * valid_mask + + return bin_labels, bin_label_weights, valid_mask + + +def binary_cross_entropy(pred, + label, + weight=None, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=-100, + avg_non_ignore=False, + **kwargs): + """Calculate the binary CrossEntropy loss. + + Args: + pred (torch.Tensor): The prediction with shape (N, 1). + label (torch.Tensor): The learning label of the prediction. + Note: In bce loss, label < 0 is invalid. + weight (torch.Tensor, optional): Sample-wise loss weight. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (int): The label index to be ignored. Default: -100. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` + + Returns: + torch.Tensor: The calculated loss + """ + if pred.size(1) == 1: + # For binary class segmentation, the shape of pred is + # [N, 1, H, W] and that of label is [N, H, W]. + assert label.max() <= 1, \ + 'For pred with shape [N, 1, H, W], its label must have at ' \ + 'most 2 classes' + pred = pred.squeeze() + if pred.dim() != label.dim(): + assert (pred.dim() == 2 and label.dim() == 1) or ( + pred.dim() == 4 and label.dim() == 3), \ + 'Only pred shape [N, C], label shape [N] or pred shape [N, C, ' \ + 'H, W], label shape [N, H, W] are supported' + # `weight` returned from `_expand_onehot_labels` + # has been treated for valid (non-ignore) pixels + label, weight, valid_mask = _expand_onehot_labels( + label, weight, pred.shape, ignore_index) + else: + # should mask out the ignored elements + valid_mask = ((label >= 0) & (label != ignore_index)).float() + if weight is not None: + weight = weight * valid_mask + else: + weight = valid_mask + # average loss over non-ignored and valid elements + if reduction == 'mean' and avg_factor is None and avg_non_ignore: + avg_factor = valid_mask.sum().item() + + loss = F.binary_cross_entropy_with_logits( + pred, label.float(), pos_weight=class_weight, reduction='none') + # do the reduction for the weighted loss + loss = weight_reduce_loss( + loss, weight, reduction=reduction, avg_factor=avg_factor) + + return loss + + +def mask_cross_entropy(pred, + target, + label, + reduction='mean', + avg_factor=None, + class_weight=None, + ignore_index=None, + **kwargs): + """Calculate the CrossEntropy loss for masks. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + target (torch.Tensor): The learning label of the prediction. + label (torch.Tensor): ``label`` indicates the class label of the mask' + corresponding object. This will be used to select the mask in the + of the class which the object belongs to when the mask prediction + if not class-agnostic. + reduction (str, optional): The method used to reduce the loss. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + class_weight (list[float], optional): The weight for each class. + ignore_index (None): Placeholder, to be consistent with other loss. + Default: None. + + Returns: + torch.Tensor: The calculated loss + """ + assert ignore_index is None, 'BCE loss does not support ignore_index' + # TODO: handle these two reserved arguments + assert reduction == 'mean' and avg_factor is None + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits( + pred_slice, target, weight=class_weight, reduction='mean')[None] + + +@LOSSES.register_module(force=True) +class CrossEntropyLoss(nn.Module): + """CrossEntropyLoss. + + Args: + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Defaults to False. + use_mask (bool, optional): Whether to use mask cross entropy loss. + Defaults to False. + reduction (str, optional): . Defaults to 'mean'. + Options are "none", "mean" and "sum". + class_weight (list[float] | str, optional): Weight of each class. If in + str format, read them from a file. Defaults to None. + loss_weight (float, optional): Weight of the loss. Defaults to 1.0. + loss_name (str, optional): Name of the loss item. If you want this loss + item to be included into the backward graph, `loss_` must be the + prefix of the name. Defaults to 'loss_ce'. + avg_non_ignore (bool): The flag decides to whether the loss is + only averaged over non-ignored targets. Default: False. + `New in version 0.23.0.` + """ + def __init__(self, + use_sigmoid=False, + use_mask=False, + reduction='mean', + class_weight=None, + loss_weight=1.0, + loss_name='loss_ce', + avg_non_ignore=False): + super(CrossEntropyLoss, self).__init__() + assert (use_sigmoid is False) or (use_mask is False) + self.use_sigmoid = use_sigmoid + self.use_mask = use_mask + self.reduction = reduction + self.loss_weight = loss_weight + self.class_weight = get_class_weight(class_weight) + self.avg_non_ignore = avg_non_ignore + if not self.avg_non_ignore and self.reduction == 'mean': + warnings.warn( + 'Default ``avg_non_ignore`` is False, if you would like to ' + 'ignore the certain label and average loss over non-ignore ' + 'labels, which is the same with PyTorch official ' + 'cross_entropy, set ``avg_non_ignore=True``.') + + if self.use_sigmoid: + self.cls_criterion = binary_cross_entropy + elif self.use_mask: + self.cls_criterion = mask_cross_entropy + else: + self.cls_criterion = cross_entropy + self._loss_name = loss_name + + def extra_repr(self): + """Extra repr.""" + s = f'avg_non_ignore={self.avg_non_ignore}' + return s + + def forward(self, + cls_score, + label, + weight=None, + avg_factor=None, + reduction_override=None, + ignore_index=-100, + **kwargs): + """Forward function.""" + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = (reduction_override + if reduction_override else self.reduction) + if self.class_weight is not None: + class_weight = cls_score.new_tensor(self.class_weight) + else: + class_weight = None + # Note: for BCE loss, label < 0 is invalid. + loss_cls = self.loss_weight * self.cls_criterion( + cls_score, + label, + weight, + class_weight=class_weight, + reduction=reduction, + avg_factor=avg_factor, + avg_non_ignore=self.avg_non_ignore, + ignore_index=ignore_index, + **kwargs) + return loss_cls + + @property + def loss_name(self): + """Loss Name. + + This function must be implemented and will return the name of this + loss function. This name will be used to combine different loss items + by simple sum operation. In addition, if you want this loss item to be + included into the backward graph, `loss_` must be the prefix of the + name. + + Returns: + str: The name of this loss item. + """ + return self._loss_name diff --git a/segmentation/mmseg_custom/models/losses/dice_loss.py b/segmentation/mmseg_custom/models/losses/dice_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..29bf032429aedd5c661d3445ab60bdb72f2f7d82 --- /dev/null +++ b/segmentation/mmseg_custom/models/losses/dice_loss.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +from mmseg.models.builder import LOSSES +from mmseg.models.losses.utils import weight_reduce_loss + + +def dice_loss(pred, + target, + weight=None, + eps=1e-3, + reduction='mean', + avg_factor=None): + """Calculate dice loss, which is proposed in + `V-Net: Fully Convolutional Neural Networks for Volumetric + Medical Image Segmentation `_. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *) + target (torch.Tensor): The learning label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + eps (float): Avoid dividing by zero. Default: 1e-3. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + + input = pred.flatten(1) + target = target.flatten(1).float() + + a = torch.sum(input * target, 1) + b = torch.sum(input * input, 1) + eps + c = torch.sum(target * target, 1) + eps + d = (2 * a) / (b + c) + loss = 1 - d + if weight is not None: + assert weight.ndim == loss.ndim + assert len(weight) == len(pred) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +def naive_dice_loss(pred, + target, + weight=None, + eps=1e-3, + reduction='mean', + avg_factor=None): + """Calculate naive dice loss, the coefficient in the denominator is the + first power instead of the second power. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *) + target (torch.Tensor): The learning label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + eps (float): Avoid dividing by zero. Default: 1e-3. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + input = pred.flatten(1) + target = target.flatten(1).float() + + a = torch.sum(input * target, 1) + b = torch.sum(input, 1) + c = torch.sum(target, 1) + d = (2 * a + eps) / (b + c + eps) + loss = 1 - d + if weight is not None: + assert weight.ndim == loss.ndim + assert len(weight) == len(pred) + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module(force=True) +class DiceLoss(nn.Module): + def __init__(self, + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=False, + loss_weight=1.0, + eps=1e-3): + """Dice Loss, there are two forms of dice loss is supported: + + - the one proposed in `V-Net: Fully Convolutional Neural + Networks for Volumetric Medical Image Segmentation + `_. + - the dice loss in which the power of the number in the + denominator is the first power instead of the second + power. + + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + activate (bool): Whether to activate the predictions inside, + this will disable the inside sigmoid operation. + Defaults to True. + reduction (str, optional): The method used + to reduce the loss. Options are "none", + "mean" and "sum". Defaults to 'mean'. + naive_dice (bool, optional): If false, use the dice + loss defined in the V-Net paper, otherwise, use the + naive dice loss in which the power of the number in the + denominator is the first power instead of the second + power.Defaults to False. + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + eps (float): Avoid dividing by zero. Defaults to 1e-3. + """ + + super(DiceLoss, self).__init__() + self.use_sigmoid = use_sigmoid + self.reduction = reduction + self.naive_dice = naive_dice + self.loss_weight = loss_weight + self.eps = eps + self.activate = activate + + def forward(self, + pred, + target, + weight=None, + reduction_override=None, + avg_factor=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction, has a shape (n, *). + target (torch.Tensor): The label of the prediction, + shape (n, *), same shape of pred. + weight (torch.Tensor, optional): The weight of loss for each + prediction, has a shape (n,). Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = (reduction_override + if reduction_override else self.reduction) + + if self.activate: + if self.use_sigmoid: + pred = pred.sigmoid() + else: + raise NotImplementedError + + if self.naive_dice: + loss = self.loss_weight * naive_dice_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor) + else: + loss = self.loss_weight * dice_loss( + pred, + target, + weight, + eps=self.eps, + reduction=reduction, + avg_factor=avg_factor) + + return loss diff --git a/segmentation/mmseg_custom/models/losses/focal_loss.py b/segmentation/mmseg_custom/models/losses/focal_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..3d48a2bb6e5933fcaac845105988dce8b40c63e7 --- /dev/null +++ b/segmentation/mmseg_custom/models/losses/focal_loss.py @@ -0,0 +1,180 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss +from mmseg.models.builder import LOSSES +from mmseg.models.losses.utils import weight_reduce_loss + + +# This method is only for debugging +def py_sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + """PyTorch version of `Focal Loss `_. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the + number of classes + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + pred_sigmoid = pred.sigmoid() + target = target.type_as(pred) + pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) + focal_weight = (alpha * target + (1 - alpha) * + (1 - target)) * pt.pow(gamma) + loss = F.binary_cross_entropy_with_logits( + pred, target, reduction='none') * focal_weight + if weight is not None: + if weight.shape != loss.shape: + if weight.size(0) == loss.size(0): + # For most cases, weight is of shape (num_priors, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + else: + # Sometimes, weight per anchor per class is also needed. e.g. + # in FSAF. But it may be flattened of shape + # (num_priors x num_class, ), while loss is still of shape + # (num_priors, num_class). + assert weight.numel() == loss.numel() + weight = weight.view(loss.size(0), -1) + assert weight.ndim == loss.ndim + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +def sigmoid_focal_loss(pred, + target, + weight=None, + gamma=2.0, + alpha=0.25, + reduction='mean', + avg_factor=None): + r"""A warpper of cuda version `Focal Loss + `_. + + Args: + pred (torch.Tensor): The prediction with shape (N, C), C is the number + of classes. + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): Sample-wise loss weight. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + """ + # Function.apply does not accept keyword arguments, so the decorator + # "weighted_loss" is not applicable + loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma, + alpha, None, 'none') + if weight is not None: + if weight.shape != loss.shape: + if weight.size(0) == loss.size(0): + # For most cases, weight is of shape (num_priors, ), + # which means it does not have the second axis num_class + weight = weight.view(-1, 1) + else: + # Sometimes, weight per anchor per class is also needed. e.g. + # in FSAF. But it may be flattened of shape + # (num_priors x num_class, ), while loss is still of shape + # (num_priors, num_class). + assert weight.numel() == loss.numel() + weight = weight.view(loss.size(0), -1) + assert weight.ndim == loss.ndim + loss = weight_reduce_loss(loss, weight, reduction, avg_factor) + return loss + + +@LOSSES.register_module(force=True) +class FocalLoss(nn.Module): + def __init__(self, + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + reduction='mean', + loss_weight=1.0): + """`Focal Loss `_ + + Args: + use_sigmoid (bool, optional): Whether to the prediction is + used for sigmoid or softmax. Defaults to True. + gamma (float, optional): The gamma for calculating the modulating + factor. Defaults to 2.0. + alpha (float, optional): A balanced form for Focal Loss. + Defaults to 0.25. + reduction (str, optional): The method used to reduce the loss into + a scalar. Defaults to 'mean'. Options are "none", "mean" and + "sum". + loss_weight (float, optional): Weight of loss. Defaults to 1.0. + """ + super(FocalLoss, self).__init__() + assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' + self.use_sigmoid = use_sigmoid + self.gamma = gamma + self.alpha = alpha + self.reduction = reduction + self.loss_weight = loss_weight + + def forward(self, + pred, + target, + weight=None, + avg_factor=None, + reduction_override=None): + """Forward function. + + Args: + pred (torch.Tensor): The prediction. + target (torch.Tensor): The learning label of the prediction. + weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + avg_factor (int, optional): Average factor that is used to average + the loss. Defaults to None. + reduction_override (str, optional): The reduction method used to + override the original reduction method of the loss. + Options are "none", "mean" and "sum". + + Returns: + torch.Tensor: The calculated loss + """ + assert reduction_override in (None, 'none', 'mean', 'sum') + reduction = ( + reduction_override if reduction_override else self.reduction) + if self.use_sigmoid: + if torch.cuda.is_available() and pred.is_cuda: + calculate_loss_func = sigmoid_focal_loss + else: + num_classes = pred.size(1) + target = F.one_hot(target, num_classes=num_classes + 1) + target = target[:, :num_classes] + calculate_loss_func = py_sigmoid_focal_loss + + loss_cls = self.loss_weight * calculate_loss_func( + pred, + target, + weight, + gamma=self.gamma, + alpha=self.alpha, + reduction=reduction, + avg_factor=avg_factor) + + else: + raise NotImplementedError + return loss_cls diff --git a/segmentation/mmseg_custom/models/losses/match_costs.py b/segmentation/mmseg_custom/models/losses/match_costs.py new file mode 100644 index 0000000000000000000000000000000000000000..9c04862d4b9347bb04f060e8681ac8b06ca0ecda --- /dev/null +++ b/segmentation/mmseg_custom/models/losses/match_costs.py @@ -0,0 +1,233 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import MATCH_COST + + +@MATCH_COST.register_module() +class FocalLossCost: + """FocalLossCost. + + Args: + weight (int | float, optional): loss_weight + alpha (int | float, optional): focal_loss alpha + gamma (int | float, optional): focal_loss gamma + eps (float, optional): default 1e-12 + + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost + >>> import torch + >>> self = FocalLossCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3236, -0.3364, -0.2699], + [-0.3439, -0.3209, -0.4807], + [-0.4099, -0.3795, -0.2929], + [-0.1950, -0.1207, -0.2626]]) + """ + def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12): + self.weight = weight + self.alpha = alpha + self.gamma = gamma + self.eps = eps + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + + Returns: + torch.Tensor: cls_cost value with weight + """ + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] + return cls_cost * self.weight + + +@MATCH_COST.register_module() +class MaskFocalLossCost(FocalLossCost): + """Cost of mask assignments based on focal losses. + + Args: + weight (int | float, optional): loss_weight. + alpha (int | float, optional): focal_loss alpha. + gamma (int | float, optional): focal_loss gamma. + eps (float, optional): default 1e-12. + """ + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classfication logits + in shape (N1, H, W), dtype=torch.float32. + gt_labels (Tensor): Ground truth in shape (N2, H, W), + dtype=torch.long. + + Returns: + Tensor: classification cost matrix in shape (N1, N2). + """ + cls_pred = cls_pred.reshape((cls_pred.shape[0], -1)) + gt_labels = gt_labels.reshape((gt_labels.shape[0], -1)).float() + hw = cls_pred.shape[1] + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + + cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ + torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) + return cls_cost / hw * self.weight + + +@MATCH_COST.register_module() +class ClassificationCost: + """ClsSoftmaxCost.Borrow from + mmdet.core.bbox.match_costs.match_cost.ClassificationCost. + + Args: + weight (int | float, optional): loss_weight + + Examples: + >>> import torch + >>> self = ClassificationCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3430, -0.3525, -0.3045], + [-0.3077, -0.2931, -0.3992], + [-0.3664, -0.3455, -0.2881], + [-0.3343, -0.2701, -0.3956]]) + """ + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + + Returns: + torch.Tensor: cls_cost value with weight + """ + # Following the official DETR repo, contrary to the loss that + # NLL is used, we approximate it in 1 - cls_score[gt_label]. + # The 1 is a constant that doesn't change the matching, + # so it can be omitted. + cls_score = cls_pred.softmax(-1) + cls_cost = -cls_score[:, gt_labels] + return cls_cost * self.weight + + +@MATCH_COST.register_module() +class DiceCost: + """Cost of mask assignments based on dice losses. + + Args: + weight (int | float, optional): loss_weight. Defaults to 1. + pred_act (bool, optional): Whether to apply sigmoid to mask_pred. + Defaults to False. + eps (float, optional): default 1e-12. + """ + def __init__(self, weight=1., pred_act=False, eps=1e-3): + self.weight = weight + self.pred_act = pred_act + self.eps = eps + + def binary_mask_dice_loss(self, mask_preds, gt_masks): + """ + Args: + mask_preds (Tensor): Mask prediction in shape (N1, H, W). + gt_masks (Tensor): Ground truth in shape (N2, H, W) + store 0 or 1, 0 for negative class and 1 for + positive class. + + Returns: + Tensor: Dice cost matrix in shape (N1, N2). + """ + mask_preds = mask_preds.reshape((mask_preds.shape[0], -1)) + gt_masks = gt_masks.reshape((gt_masks.shape[0], -1)).float() + numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) + denominator = mask_preds.sum(-1)[:, None] + gt_masks.sum(-1)[None, :] + loss = 1 - (numerator + self.eps) / (denominator + self.eps) + return loss + + def __call__(self, mask_preds, gt_masks): + """ + Args: + mask_preds (Tensor): Mask prediction logits in shape (N1, H, W). + gt_masks (Tensor): Ground truth in shape (N2, H, W). + + Returns: + Tensor: Dice cost matrix in shape (N1, N2). + """ + if self.pred_act: + mask_preds = mask_preds.sigmoid() + dice_cost = self.binary_mask_dice_loss(mask_preds, gt_masks) + return dice_cost * self.weight + + +@MATCH_COST.register_module() +class CrossEntropyLossCost: + """CrossEntropyLossCost. + + Args: + weight (int | float, optional): loss weight. Defaults to 1. + use_sigmoid (bool, optional): Whether the prediction uses sigmoid + of softmax. Defaults to True. + """ + def __init__(self, weight=1., use_sigmoid=True): + assert use_sigmoid, 'use_sigmoid = False is not supported yet.' + self.weight = weight + self.use_sigmoid = use_sigmoid + + def _binary_cross_entropy(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): The prediction with shape (num_query, 1, *) or + (num_query, *). + gt_labels (Tensor): The learning label of prediction with + shape (num_gt, *). + Returns: + Tensor: Cross entropy cost matrix in shape (num_query, num_gt). + """ + cls_pred = cls_pred.flatten(1).float() + gt_labels = gt_labels.flatten(1).float() + n = cls_pred.shape[1] + pos = F.binary_cross_entropy_with_logits( + cls_pred, torch.ones_like(cls_pred), reduction='none') + neg = F.binary_cross_entropy_with_logits( + cls_pred, torch.zeros_like(cls_pred), reduction='none') + cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \ + torch.einsum('nc,mc->nm', neg, 1 - gt_labels) + cls_cost = cls_cost / n + + return cls_cost + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits. + gt_labels (Tensor): Labels. + Returns: + Tensor: Cross entropy cost matrix with weight in + shape (num_query, num_gt). + """ + if self.use_sigmoid: + cls_cost = self._binary_cross_entropy(cls_pred, gt_labels) + else: + raise NotImplementedError + + return cls_cost * self.weight diff --git a/segmentation/mmseg_custom/models/losses/match_loss.py b/segmentation/mmseg_custom/models/losses/match_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..3c53839ac766a9582219b74e29ae2c4592d41185 --- /dev/null +++ b/segmentation/mmseg_custom/models/losses/match_loss.py @@ -0,0 +1,179 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..builder import MATCH_COST + + +@MATCH_COST.register_module() +class FocalLossCost: + """FocalLossCost. + + Args: + weight (int | float, optional): loss_weight + alpha (int | float, optional): focal_loss alpha + gamma (int | float, optional): focal_loss gamma + eps (float, optional): default 1e-12 + + Examples: + >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost + >>> import torch + >>> self = FocalLossCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3236, -0.3364, -0.2699], + [-0.3439, -0.3209, -0.4807], + [-0.4099, -0.3795, -0.2929], + [-0.1950, -0.1207, -0.2626]]) + """ + def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12): + self.weight = weight + self.alpha = alpha + self.gamma = gamma + self.eps = eps + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + + Returns: + torch.Tensor: cls_cost value with weight + """ + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] + return cls_cost * self.weight + + +@MATCH_COST.register_module() +class MaskFocalLossCost(FocalLossCost): + """Cost of mask assignments based on focal losses. + + Args: + weight (int | float, optional): loss_weight. + alpha (int | float, optional): focal_loss alpha. + gamma (int | float, optional): focal_loss gamma. + eps (float, optional): default 1e-12. + """ + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classfication logits + in shape (N1, H, W), dtype=torch.float32. + gt_labels (Tensor): Ground truth in shape (N2, H, W), + dtype=torch.long. + + Returns: + Tensor: classification cost matrix in shape (N1, N2). + """ + cls_pred = cls_pred.reshape((cls_pred.shape[0], -1)) + gt_labels = gt_labels.reshape((gt_labels.shape[0], -1)).float() + hw = cls_pred.shape[1] + cls_pred = cls_pred.sigmoid() + neg_cost = -(1 - cls_pred + self.eps).log() * ( + 1 - self.alpha) * cls_pred.pow(self.gamma) + pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( + 1 - cls_pred).pow(self.gamma) + + cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ + torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) + return cls_cost / hw * self.weight + + +@MATCH_COST.register_module() +class ClassificationCost: + """ClsSoftmaxCost.Borrow from + mmdet.core.bbox.match_costs.match_cost.ClassificationCost. + + Args: + weight (int | float, optional): loss_weight + + Examples: + >>> import torch + >>> self = ClassificationCost() + >>> cls_pred = torch.rand(4, 3) + >>> gt_labels = torch.tensor([0, 1, 2]) + >>> factor = torch.tensor([10, 8, 10, 8]) + >>> self(cls_pred, gt_labels) + tensor([[-0.3430, -0.3525, -0.3045], + [-0.3077, -0.2931, -0.3992], + [-0.3664, -0.3455, -0.2881], + [-0.3343, -0.2701, -0.3956]]) + """ + def __init__(self, weight=1.): + self.weight = weight + + def __call__(self, cls_pred, gt_labels): + """ + Args: + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). + + Returns: + torch.Tensor: cls_cost value with weight + """ + # Following the official DETR repo, contrary to the loss that + # NLL is used, we approximate it in 1 - cls_score[gt_label]. + # The 1 is a constant that doesn't change the matching, + # so it can be omitted. + cls_score = cls_pred.softmax(-1) + cls_cost = -cls_score[:, gt_labels] + return cls_cost * self.weight + + +@MATCH_COST.register_module() +class DiceCost: + """Cost of mask assignments based on dice losses. + + Args: + weight (int | float, optional): loss_weight. Defaults to 1. + pred_act (bool, optional): Whether to apply sigmoid to mask_pred. + Defaults to False. + eps (float, optional): default 1e-12. + """ + def __init__(self, weight=1., pred_act=False, eps=1e-3): + self.weight = weight + self.pred_act = pred_act + self.eps = eps + + def binary_mask_dice_loss(self, mask_preds, gt_masks): + """ + Args: + mask_preds (Tensor): Mask prediction in shape (N1, H, W). + gt_masks (Tensor): Ground truth in shape (N2, H, W) + store 0 or 1, 0 for negative class and 1 for + positive class. + + Returns: + Tensor: Dice cost matrix in shape (N1, N2). + """ + mask_preds = mask_preds.reshape((mask_preds.shape[0], -1)) + gt_masks = gt_masks.reshape((gt_masks.shape[0], -1)).float() + numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) + denominator = mask_preds.sum(-1)[:, None] + gt_masks.sum(-1)[None, :] + loss = 1 - (numerator + self.eps) / (denominator + self.eps) + return loss + + def __call__(self, mask_preds, gt_masks): + """ + Args: + mask_preds (Tensor): Mask prediction logits in shape (N1, H, W). + gt_masks (Tensor): Ground truth in shape (N2, H, W). + + Returns: + Tensor: Dice cost matrix in shape (N1, N2). + """ + if self.pred_act: + mask_preds = mask_preds.sigmoid() + dice_cost = self.binary_mask_dice_loss(mask_preds, gt_masks) + return dice_cost * self.weight diff --git a/segmentation/mmseg_custom/models/plugins/__init__.py b/segmentation/mmseg_custom/models/plugins/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a1bb9ad252d1b733fc257c8fa1530025a17eb209 --- /dev/null +++ b/segmentation/mmseg_custom/models/plugins/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder +from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder + +__all__ = [ + 'PixelDecoder', 'TransformerEncoderPixelDecoder', + 'MSDeformAttnPixelDecoder' +] diff --git a/segmentation/mmseg_custom/models/plugins/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/models/plugins/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e89edcc42409a4f1a65e62e579dd8ffd09788ba3 Binary files /dev/null and b/segmentation/mmseg_custom/models/plugins/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/plugins/__pycache__/msdeformattn_pixel_decoder.cpython-39.pyc b/segmentation/mmseg_custom/models/plugins/__pycache__/msdeformattn_pixel_decoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ec1aa90bf566146e08a92259964bdb8e4bb4169 Binary files /dev/null and b/segmentation/mmseg_custom/models/plugins/__pycache__/msdeformattn_pixel_decoder.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/plugins/__pycache__/pixel_decoder.cpython-39.pyc b/segmentation/mmseg_custom/models/plugins/__pycache__/pixel_decoder.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..565ecd80744056b6ba2689073a66952b4cf7dbcb Binary files /dev/null and b/segmentation/mmseg_custom/models/plugins/__pycache__/pixel_decoder.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/plugins/msdeformattn_pixel_decoder.py b/segmentation/mmseg_custom/models/plugins/msdeformattn_pixel_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..9ae3fc0fd32f2e0fef9cbbb990210271cc0685f8 --- /dev/null +++ b/segmentation/mmseg_custom/models/plugins/msdeformattn_pixel_decoder.py @@ -0,0 +1,268 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import (PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init, + normal_init, xavier_init) +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer_sequence) +from mmcv.runner import BaseModule, ModuleList + +from ...core.anchor import MlvlPointGenerator +from ..utils.transformer import MultiScaleDeformableAttention + + +@PLUGIN_LAYERS.register_module() +class MSDeformAttnPixelDecoder(BaseModule): + """Pixel decoder with multi-scale deformable attention. + + Args: + in_channels (list[int] | tuple[int]): Number of channels in the + input feature maps. + strides (list[int] | tuple[int]): Output strides of feature from + backbone. + feat_channels (int): Number of channels for feature. + out_channels (int): Number of channels for output. + num_outs (int): Number of output scales. + norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization. + Defaults to dict(type='GN', num_groups=32). + act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation. + Defaults to dict(type='ReLU'). + encoder (:obj:`mmcv.ConfigDict` | dict): Config for transformer + encoder. Defaults to `DetrTransformerEncoder`. + positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for + transformer encoder position encoding. Defaults to + dict(type='SinePositionalEncoding', num_feats=128, + normalize=True). + init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict. + """ + def __init__(self, + in_channels=[256, 512, 1024, 2048], + strides=[4, 8, 16, 32], + feat_channels=256, + out_channels=256, + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + feedforward_channels=1024, + ffn_dropout=0.0, + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.strides = strides + self.num_input_levels = len(in_channels) + self.num_encoder_levels = \ + encoder.transformerlayers.attn_cfgs.num_levels + assert self.num_encoder_levels >= 1, \ + 'num_levels in attn_cfgs must be at least one' + input_conv_list = [] + # from top to down (low to high resolution) + for i in range(self.num_input_levels - 1, + self.num_input_levels - self.num_encoder_levels - 1, + -1): + input_conv = ConvModule( + in_channels[i], + feat_channels, + kernel_size=1, + norm_cfg=norm_cfg, + act_cfg=None, + bias=True) + input_conv_list.append(input_conv) + self.input_convs = ModuleList(input_conv_list) + + self.encoder = build_transformer_layer_sequence(encoder) + self.postional_encoding = build_positional_encoding( + positional_encoding) + # high resolution to low resolution + self.level_encoding = nn.Embedding(self.num_encoder_levels, + feat_channels) + + # fpn-like structure + self.lateral_convs = ModuleList() + self.output_convs = ModuleList() + self.use_bias = norm_cfg is None + # from top to down (low to high resolution) + # fpn for the rest features that didn't pass in encoder + for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1, + -1): + lateral_conv = ConvModule( + in_channels[i], + feat_channels, + kernel_size=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=None) + output_conv = ConvModule( + feat_channels, + feat_channels, + kernel_size=3, + stride=1, + padding=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.lateral_convs.append(lateral_conv) + self.output_convs.append(output_conv) + + self.mask_feature = Conv2d( + feat_channels, out_channels, kernel_size=1, stride=1, padding=0) + + self.num_outs = num_outs + self.point_generator = MlvlPointGenerator(strides) + + def init_weights(self): + """Initialize weights.""" + for i in range(0, self.num_encoder_levels): + xavier_init( + self.input_convs[i].conv, + gain=1, + bias=0, + distribution='uniform') + + for i in range(0, self.num_input_levels - self.num_encoder_levels): + caffe2_xavier_init(self.lateral_convs[i].conv, bias=0) + caffe2_xavier_init(self.output_convs[i].conv, bias=0) + + caffe2_xavier_init(self.mask_feature, bias=0) + + normal_init(self.level_encoding, mean=0, std=1) + for p in self.encoder.parameters(): + if p.dim() > 1: + nn.init.xavier_normal_(p) + + # init_weights defined in MultiScaleDeformableAttention + for layer in self.encoder.layers: + for attn in layer.attentions: + if isinstance(attn, MultiScaleDeformableAttention): + attn.init_weights() + + def forward(self, feats): + """ + Args: + feats (list[Tensor]): Feature maps of each level. Each has + shape of (batch_size, c, h, w). + + Returns: + tuple: A tuple containing the following: + + - mask_feature (Tensor): shape (batch_size, c, h, w). + - multi_scale_features (list[Tensor]): Multi scale \ + features, each in shape (batch_size, c, h, w). + """ + # generate padding mask for each level, for each image + batch_size = feats[0].shape[0] + encoder_input_list = [] + padding_mask_list = [] + level_positional_encoding_list = [] + spatial_shapes = [] + reference_points_list = [] + for i in range(self.num_encoder_levels): + level_idx = self.num_input_levels - i - 1 + feat = feats[level_idx] + feat_projected = self.input_convs[i](feat) + h, w = feat.shape[-2:] + + # no padding + padding_mask_resized = feat.new_zeros( + (batch_size, ) + feat.shape[-2:], dtype=torch.bool) + pos_embed = self.postional_encoding(padding_mask_resized) + level_embed = self.level_encoding.weight[i] + level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed + # (h_i * w_i, 2) + reference_points = self.point_generator.single_level_grid_priors( + feat.shape[-2:], level_idx, device=feat.device) + # normalize + factor = feat.new_tensor([[w, h]]) * self.strides[level_idx] + reference_points = reference_points / factor + + # shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c) + feat_projected = feat_projected.flatten(2).permute(2, 0, 1) + level_pos_embed = level_pos_embed.flatten(2).permute(2, 0, 1) + padding_mask_resized = padding_mask_resized.flatten(1) + + encoder_input_list.append(feat_projected) + padding_mask_list.append(padding_mask_resized) + level_positional_encoding_list.append(level_pos_embed) + spatial_shapes.append(feat.shape[-2:]) + reference_points_list.append(reference_points) + # shape (batch_size, total_num_query), + # total_num_query=sum([., h_i * w_i,.]) + padding_masks = torch.cat(padding_mask_list, dim=1) + # shape (total_num_query, batch_size, c) + encoder_inputs = torch.cat(encoder_input_list, dim=0) + level_positional_encodings = torch.cat( + level_positional_encoding_list, dim=0) + device = encoder_inputs.device + # shape (num_encoder_levels, 2), from low + # resolution to high resolution + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=device) + # shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1, )), spatial_shapes.prod(1).cumsum(0)[:-1])) + reference_points = torch.cat(reference_points_list, dim=0) + reference_points = reference_points[None, :, None].repeat( + batch_size, 1, self.num_encoder_levels, 1) + valid_radios = reference_points.new_ones( + (batch_size, self.num_encoder_levels, 2)) + # shape (num_total_query, batch_size, c) + memory = self.encoder( + query=encoder_inputs, + key=None, + value=None, + query_pos=level_positional_encodings, + key_pos=None, + attn_masks=None, + key_padding_mask=None, + query_key_padding_mask=padding_masks, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_radios=valid_radios) + # (num_total_query, batch_size, c) -> (batch_size, c, num_total_query) + memory = memory.permute(1, 2, 0) + + # from low resolution to high resolution + num_query_per_level = [e[0] * e[1] for e in spatial_shapes] + outs = torch.split(memory, num_query_per_level, dim=-1) + outs = [ + x.reshape(batch_size, -1, spatial_shapes[i][0], + spatial_shapes[i][1]) for i, x in enumerate(outs) + ] + + for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1, + -1): + x = feats[i] + cur_feat = self.lateral_convs[i](x) + y = cur_feat + F.interpolate( + outs[-1], + size=cur_feat.shape[-2:], + mode='bilinear', + align_corners=False) + y = self.output_convs[i](y) + outs.append(y) + multi_scale_features = outs[:self.num_outs] + + mask_feature = self.mask_feature(outs[-1]) + return mask_feature, multi_scale_features diff --git a/segmentation/mmseg_custom/models/plugins/pixel_decoder.py b/segmentation/mmseg_custom/models/plugins/pixel_decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..62e488f6848743c041e836b62ac909382b5bd34d --- /dev/null +++ b/segmentation/mmseg_custom/models/plugins/pixel_decoder.py @@ -0,0 +1,237 @@ +import torch +import torch.nn.functional as F +from mmcv.cnn import PLUGIN_LAYERS, Conv2d, ConvModule, kaiming_init +from mmcv.cnn.bricks.transformer import (build_positional_encoding, + build_transformer_layer_sequence) +from mmcv.runner import BaseModule, ModuleList + + +@PLUGIN_LAYERS.register_module() +class PixelDecoder(BaseModule): + """Pixel decoder with a structure like fpn. + + Args: + in_channels (list[int] | tuple[int]): Number of channels in the + input feature maps. + feat_channels (int): Number channels for feature. + out_channels (int): Number channels for output. + norm_cfg (obj:`mmcv.ConfigDict`|dict): Config for normalization. + Defaults to dict(type='GN', num_groups=32). + act_cfg (obj:`mmcv.ConfigDict`|dict): Config for activation. + Defaults to dict(type='ReLU'). + encoder (obj:`mmcv.ConfigDict`|dict): Config for transorformer + encoder.Defaults to None. + positional_encoding (obj:`mmcv.ConfigDict`|dict): Config for + transformer encoder position encoding. Defaults to + dict(type='SinePositionalEncoding', num_feats=128, + normalize=True). + init_cfg (obj:`mmcv.ConfigDict`|dict): Initialization config dict. + Default: None + """ + def __init__(self, + in_channels, + feat_channels, + out_channels, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.num_inputs = len(in_channels) + self.lateral_convs = ModuleList() + self.output_convs = ModuleList() + self.use_bias = norm_cfg is None + for i in range(0, self.num_inputs - 1): + l_conv = ConvModule( + in_channels[i], + feat_channels, + kernel_size=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=None) + o_conv = ConvModule( + feat_channels, + feat_channels, + kernel_size=3, + stride=1, + padding=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.lateral_convs.append(l_conv) + self.output_convs.append(o_conv) + + self.last_feat_conv = ConvModule( + in_channels[-1], + feat_channels, + kernel_size=3, + padding=1, + stride=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + self.mask_feature = Conv2d( + feat_channels, out_channels, kernel_size=3, stride=1, padding=1) + + def init_weights(self): + """Initialize weights.""" + for i in range(0, self.num_inputs - 2): + kaiming_init(self.lateral_convs[i].conv, a=1) + kaiming_init(self.output_convs[i].conv, a=1) + + kaiming_init(self.mask_feature, a=1) + kaiming_init(self.last_feat_conv, a=1) + + def forward(self, feats, img_metas): + """ + Args: + feats (list[Tensor]): Feature maps of each level. Each has + shape of [bs, c, h, w]. + img_metas (list[dict]): List of image information. Pass in + for creating more accurate padding mask. #! not used here. + + Returns: + tuple: a tuple containing the following: + + - mask_feature (Tensor): Shape [bs, c, h, w]. + - memory (Tensor): Output of last stage of backbone. + Shape [bs, c, h, w]. + """ + y = self.last_feat_conv(feats[-1]) + for i in range(self.num_inputs - 2, -1, -1): + x = feats[i] + cur_fpn = self.lateral_convs[i](x) + y = cur_fpn + \ + F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest') + y = self.output_convs[i](y) + + mask_feature = self.mask_feature(y) + memory = feats[-1] + return mask_feature, memory + + +@PLUGIN_LAYERS.register_module() +class TransformerEncoderPixelDecoder(PixelDecoder): + """Pixel decoder with transormer encoder inside. + + Args: + in_channels (list[int] | tuple[int]): Number of channels in the + input feature maps. + feat_channels (int): Number channels for feature. + out_channels (int): Number channels for output. + norm_cfg (obj:`mmcv.ConfigDict`|dict): Config for normalization. + Defaults to dict(type='GN', num_groups=32). + act_cfg (obj:`mmcv.ConfigDict`|dict): Config for activation. + Defaults to dict(type='ReLU'). + encoder (obj:`mmcv.ConfigDict`|dict): Config for transorformer + encoder.Defaults to None. + positional_encoding (obj:`mmcv.ConfigDict`|dict): Config for + transformer encoder position encoding. Defaults to + dict(type='SinePositionalEncoding', num_feats=128, + normalize=True). + init_cfg (obj:`mmcv.ConfigDict`|dict): Initialization config dict. + Default: None + """ + def __init__(self, + in_channels, + feat_channels, + out_channels, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=None, + positional_encoding=dict( + type='SinePositionalEncoding', + num_feats=128, + normalize=True), + init_cfg=None): + super(TransformerEncoderPixelDecoder, self).__init__( + in_channels, + feat_channels, + out_channels, + norm_cfg, + act_cfg, + init_cfg=init_cfg) + self.last_feat_conv = None + + self.encoder = build_transformer_layer_sequence(encoder) + self.encoder_embed_dims = self.encoder.embed_dims + assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \ + 'tranformer encoder must equal to feat_channels({})'.format( + feat_channels, self.encoder_embed_dims) + self.positional_encoding = build_positional_encoding( + positional_encoding) + self.encoder_in_proj = Conv2d( + in_channels[-1], feat_channels, kernel_size=1) + self.encoder_out_proj = ConvModule( + feat_channels, + feat_channels, + kernel_size=3, + stride=1, + padding=1, + bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) + + def init_weights(self): + """Initialize weights.""" + for i in range(0, self.num_inputs - 2): + kaiming_init(self.lateral_convs[i].conv, a=1) + kaiming_init(self.output_convs[i].conv, a=1) + + kaiming_init(self.mask_feature, a=1) + kaiming_init(self.encoder_in_proj, a=1) + kaiming_init(self.encoder_out_proj.conv, a=1) + + def forward(self, feats, img_metas): + """ + Args: + feats (list[Tensor]): Feature maps of each level. Each has + shape of [bs, c, h, w]. + img_metas (list[dict]): List of image information. Pass in + for creating more accurate padding mask. + + Returns: + tuple: a tuple containing the following: + + - mask_feature (Tensor): shape [bs, c, h, w]. + - memory (Tensor): shape [bs, c, h, w]. + """ + feat_last = feats[-1] + bs, c, h, w = feat_last.shape + input_img_h, input_img_w = img_metas[0]['pad_shape'][:-1] + # input_img_h, input_img_w = img_metas[0]['batch_input_shape'] + padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w), + dtype=torch.float32) + for i in range(bs): + img_h, img_w, _ = img_metas[i]['img_shape'] + padding_mask[i, :img_h, :img_w] = 0 + padding_mask = F.interpolate( + padding_mask.unsqueeze(1), + size=feat_last.shape[-2:], + mode='nearest').to(torch.bool).squeeze(1) + + pos_embed = self.positional_encoding(padding_mask) + feat_last = self.encoder_in_proj(feat_last) + # [bs, c, h, w] -> [nq, bs, dim] + feat_last = feat_last.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + padding_mask = padding_mask.flatten(1) # [bs, h, w] -> [bs, h*w] + memory = self.encoder( + query=feat_last, + key=None, + value=None, + query_pos=pos_embed, + query_key_padding_mask=padding_mask) + # [nq, bs, em] -> [bs, c, h, w] + memory = memory.permute(1, 2, 0).view(bs, self.encoder_embed_dims, h, + w) + y = self.encoder_out_proj(memory) + for i in range(self.num_inputs - 2, -1, -1): + x = feats[i] + cur_fpn = self.lateral_convs[i](x) + y = cur_fpn + \ + F.interpolate(y, size=cur_fpn.shape[-2:], mode='nearest') + y = self.output_convs[i](y) + + mask_feature = self.mask_feature(y) + return mask_feature, memory diff --git a/segmentation/mmseg_custom/models/segmentors/__init__.py b/segmentation/mmseg_custom/models/segmentors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f380c9ef7b16d9fcb924e4f33eb02b735b9252d0 --- /dev/null +++ b/segmentation/mmseg_custom/models/segmentors/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .encoder_decoder_mask2former import EncoderDecoderMask2Former +from .encoder_decoder_mask2former_aug import EncoderDecoderMask2FormerAug + +__all__ = ['EncoderDecoderMask2Former', 'EncoderDecoderMask2FormerAug'] diff --git a/segmentation/mmseg_custom/models/segmentors/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/models/segmentors/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6c7d7715667b00540e36be46ab751b285c71f4d Binary files /dev/null and b/segmentation/mmseg_custom/models/segmentors/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/segmentors/__pycache__/encoder_decoder_mask2former.cpython-39.pyc b/segmentation/mmseg_custom/models/segmentors/__pycache__/encoder_decoder_mask2former.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8bcfd3ecab45c4b2628818391048c599645c602a Binary files /dev/null and b/segmentation/mmseg_custom/models/segmentors/__pycache__/encoder_decoder_mask2former.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/segmentors/__pycache__/encoder_decoder_mask2former_aug.cpython-39.pyc b/segmentation/mmseg_custom/models/segmentors/__pycache__/encoder_decoder_mask2former_aug.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bb8dbef36cf777469fc645c81dd4039d7aea332 Binary files /dev/null and b/segmentation/mmseg_custom/models/segmentors/__pycache__/encoder_decoder_mask2former_aug.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/segmentors/encoder_decoder_mask2former.py b/segmentation/mmseg_custom/models/segmentors/encoder_decoder_mask2former.py new file mode 100644 index 0000000000000000000000000000000000000000..190635844101ffa7514de4110e8e605226b58d3c --- /dev/null +++ b/segmentation/mmseg_custom/models/segmentors/encoder_decoder_mask2former.py @@ -0,0 +1,285 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmseg.core import add_prefix +from mmseg.models import builder +from mmseg.models.builder import SEGMENTORS +from mmseg.models.segmentors.base import BaseSegmentor +from mmseg.ops import resize + + +@SEGMENTORS.register_module() +class EncoderDecoderMask2Former(BaseSegmentor): + """Encoder Decoder segmentors. + + EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. + Note that auxiliary_head is only used for deep supervision during training, + which could be dumped during inference. + """ + def __init__(self, + backbone, + decode_head, + neck=None, + auxiliary_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(EncoderDecoderMask2Former, self).__init__(init_cfg) + if pretrained is not None: + assert backbone.get('pretrained') is None, \ + 'both backbone and segmentor set pretrained weight' + backbone.pretrained = pretrained + self.backbone = builder.build_backbone(backbone) + if neck is not None: + self.neck = builder.build_neck(neck) + decode_head.update(train_cfg=train_cfg) + decode_head.update(test_cfg=test_cfg) + self._init_decode_head(decode_head) + self._init_auxiliary_head(auxiliary_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + assert self.with_decode_head + + def _init_decode_head(self, decode_head): + """Initialize ``decode_head``""" + self.decode_head = builder.build_head(decode_head) + self.align_corners = self.decode_head.align_corners + self.num_classes = self.decode_head.num_classes + + def _init_auxiliary_head(self, auxiliary_head): + """Initialize ``auxiliary_head``""" + if auxiliary_head is not None: + if isinstance(auxiliary_head, list): + self.auxiliary_head = nn.ModuleList() + for head_cfg in auxiliary_head: + self.auxiliary_head.append(builder.build_head(head_cfg)) + else: + self.auxiliary_head = builder.build_head(auxiliary_head) + + def extract_feat(self, img): + """Extract features from images.""" + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def encode_decode(self, img, img_metas): + """Encode images with backbone and decode into a semantic segmentation + map of the same size as input.""" + x = self.extract_feat(img) + out = self._decode_head_forward_test(x, img_metas) + out = resize( + input=out, + size=img.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + return out + + def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg, + **kwargs): + """Run forward function and calculate loss for decode head in + training.""" + losses = dict() + loss_decode = self.decode_head.forward_train(x, img_metas, + gt_semantic_seg, **kwargs) + + losses.update(add_prefix(loss_decode, 'decode')) + return losses + + def _decode_head_forward_test(self, x, img_metas): + """Run forward function and calculate loss for decode head in + inference.""" + seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg) + return seg_logits + + def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg): + """Run forward function and calculate loss for auxiliary head in + training.""" + losses = dict() + if isinstance(self.auxiliary_head, nn.ModuleList): + for idx, aux_head in enumerate(self.auxiliary_head): + loss_aux = aux_head.forward_train(x, img_metas, + gt_semantic_seg, + self.train_cfg) + losses.update(add_prefix(loss_aux, f'aux_{idx}')) + else: + loss_aux = self.auxiliary_head.forward_train( + x, img_metas, gt_semantic_seg, self.train_cfg) + losses.update(add_prefix(loss_aux, 'aux')) + + return losses + + def forward_dummy(self, img): + """Dummy forward function.""" + seg_logit = self.encode_decode(img, None) + + return seg_logit + + def forward_train(self, img, img_metas, gt_semantic_seg, **kwargs): + """Forward function for training. + + Args: + img (Tensor): Input images. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + + x = self.extract_feat(img) + + losses = dict() + + loss_decode = self._decode_head_forward_train(x, img_metas, + gt_semantic_seg, + **kwargs) + losses.update(loss_decode) + + if self.with_auxiliary_head: + loss_aux = self._auxiliary_head_forward_train( + x, img_metas, gt_semantic_seg) + losses.update(loss_aux) + + return losses + + # TODO refactor + def slide_inference(self, img, img_meta, rescale): + """Inference by sliding-window with overlap. + + If h_crop > h_img or w_crop > w_img, the small patch will be used to + decode without padding. + """ + + h_stride, w_stride = self.test_cfg.stride + h_crop, w_crop = self.test_cfg.crop_size + batch_size, _, h_img, w_img = img.size() + num_classes = self.num_classes + h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1 + w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1 + preds = img.new_zeros((batch_size, num_classes, h_img, w_img)) + count_mat = img.new_zeros((batch_size, 1, h_img, w_img)) + for h_idx in range(h_grids): + for w_idx in range(w_grids): + y1 = h_idx * h_stride + x1 = w_idx * w_stride + y2 = min(y1 + h_crop, h_img) + x2 = min(x1 + w_crop, w_img) + y1 = max(y2 - h_crop, 0) + x1 = max(x2 - w_crop, 0) + crop_img = img[:, :, y1:y2, x1:x2] + crop_seg_logit = self.encode_decode(crop_img, img_meta) + preds += F.pad(crop_seg_logit, + (int(x1), int(preds.shape[3] - x2), int(y1), + int(preds.shape[2] - y2))) + + count_mat[:, :, y1:y2, x1:x2] += 1 + assert (count_mat == 0).sum() == 0 + if torch.onnx.is_in_onnx_export(): + # cast count_mat to constant while exporting to ONNX + count_mat = torch.from_numpy( + count_mat.cpu().detach().numpy()).to(device=img.device) + preds = preds / count_mat + if rescale: + preds = resize( + preds, + size=img_meta[0]['ori_shape'][:2], + mode='bilinear', + align_corners=self.align_corners, + warning=False) + return preds + + def whole_inference(self, img, img_meta, rescale): + """Inference with full image.""" + + seg_logit = self.encode_decode(img, img_meta) + if rescale: + # support dynamic shape for onnx + if torch.onnx.is_in_onnx_export(): + size = img.shape[2:] + else: + size = img_meta[0]['ori_shape'][:2] + seg_logit = resize( + seg_logit, + size=size, + mode='bilinear', + align_corners=self.align_corners, + warning=False) + + return seg_logit + + def inference(self, img, img_meta, rescale): + """Inference with slide/whole style. + + Args: + img (Tensor): The input image of shape (N, 3, H, W). + img_meta (dict): Image info dict where each dict has: 'img_shape', + 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + rescale (bool): Whether rescale back to original shape. + + Returns: + Tensor: The output segmentation map. + """ + + assert self.test_cfg.mode in ['slide', 'whole'] + ori_shape = img_meta[0]['ori_shape'] + assert all(_['ori_shape'] == ori_shape for _ in img_meta) + if self.test_cfg.mode == 'slide': + seg_logit = self.slide_inference(img, img_meta, rescale) + else: + seg_logit = self.whole_inference(img, img_meta, rescale) + output = F.softmax(seg_logit, dim=1) + flip = img_meta[0]['flip'] + if flip: + flip_direction = img_meta[0]['flip_direction'] + assert flip_direction in ['horizontal', 'vertical'] + if flip_direction == 'horizontal': + output = output.flip(dims=(3,)) + elif flip_direction == 'vertical': + output = output.flip(dims=(2,)) + + return output + + def simple_test(self, img, img_meta, rescale=True): + """Simple test with single image.""" + seg_logit = self.inference(img, img_meta, rescale) + seg_pred = seg_logit.argmax(dim=1) + if torch.onnx.is_in_onnx_export(): + # our inference backend only support 4D output + seg_pred = seg_pred.unsqueeze(0) + return seg_pred + seg_pred = seg_pred.cpu().numpy() + # unravel batch dim + seg_pred = list(seg_pred) + return seg_pred + + def aug_test(self, imgs, img_metas, rescale=True): + """Test with augmentations. + + Only rescale=True is supported. + """ + # aug_test rescale all imgs back to ori_shape for now + assert rescale + # to save memory, we get augmented seg logit inplace + seg_logit = self.inference(imgs[0], img_metas[0], rescale) + for i in range(1, len(imgs)): + cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale) + seg_logit += cur_seg_logit + seg_logit /= len(imgs) + seg_pred = seg_logit.argmax(dim=1) + seg_pred = seg_pred.cpu().numpy() + # unravel batch dim + seg_pred = list(seg_pred) + return seg_pred diff --git a/segmentation/mmseg_custom/models/segmentors/encoder_decoder_mask2former_aug.py b/segmentation/mmseg_custom/models/segmentors/encoder_decoder_mask2former_aug.py new file mode 100644 index 0000000000000000000000000000000000000000..7dc54cf7ee7e22704559ef8acc3096551da5a9c2 --- /dev/null +++ b/segmentation/mmseg_custom/models/segmentors/encoder_decoder_mask2former_aug.py @@ -0,0 +1,289 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmseg.core import add_prefix +from mmseg.models import builder +from mmseg.models.builder import SEGMENTORS +from mmseg.models.segmentors.base import BaseSegmentor +from mmseg.ops import resize + + +@SEGMENTORS.register_module() +class EncoderDecoderMask2FormerAug(BaseSegmentor): + """Encoder Decoder segmentors. + + EncoderDecoder typically consists of backbone, decode_head, auxiliary_head. + Note that auxiliary_head is only used for deep supervision during training, + which could be dumped during inference. + """ + def __init__(self, + backbone, + decode_head, + neck=None, + auxiliary_head=None, + train_cfg=None, + test_cfg=None, + pretrained=None, + init_cfg=None): + super(EncoderDecoderMask2FormerAug, self).__init__(init_cfg) + if pretrained is not None: + assert backbone.get('pretrained') is None, \ + 'both backbone and segmentor set pretrained weight' + backbone.pretrained = pretrained + self.backbone = builder.build_backbone(backbone) + if neck is not None: + self.neck = builder.build_neck(neck) + decode_head.update(train_cfg=train_cfg) + decode_head.update(test_cfg=test_cfg) + self._init_decode_head(decode_head) + self._init_auxiliary_head(auxiliary_head) + + self.train_cfg = train_cfg + self.test_cfg = test_cfg + + assert self.with_decode_head + + def _init_decode_head(self, decode_head): + """Initialize ``decode_head``""" + self.decode_head = builder.build_head(decode_head) + self.align_corners = self.decode_head.align_corners + self.num_classes = self.decode_head.num_classes + + def _init_auxiliary_head(self, auxiliary_head): + """Initialize ``auxiliary_head``""" + if auxiliary_head is not None: + if isinstance(auxiliary_head, list): + self.auxiliary_head = nn.ModuleList() + for head_cfg in auxiliary_head: + self.auxiliary_head.append(builder.build_head(head_cfg)) + else: + self.auxiliary_head = builder.build_head(auxiliary_head) + + def extract_feat(self, img): + """Extract features from images.""" + x = self.backbone(img) + if self.with_neck: + x = self.neck(x) + return x + + def encode_decode(self, img, img_metas): + """Encode images with backbone and decode into a semantic segmentation + map of the same size as input.""" + x = self.extract_feat(img) + out = self._decode_head_forward_test(x, img_metas) + out = resize( + input=out, + size=img.shape[2:], + mode='bilinear', + align_corners=self.align_corners) + return out + + def _decode_head_forward_train(self, x, img_metas, gt_semantic_seg, + **kwargs): + """Run forward function and calculate loss for decode head in + training.""" + losses = dict() + loss_decode = self.decode_head.forward_train(x, img_metas, + gt_semantic_seg, **kwargs) + + losses.update(add_prefix(loss_decode, 'decode')) + return losses + + def _decode_head_forward_test(self, x, img_metas): + """Run forward function and calculate loss for decode head in + inference.""" + seg_logits = self.decode_head.forward_test(x, img_metas, self.test_cfg) + return seg_logits + + def _auxiliary_head_forward_train(self, x, img_metas, gt_semantic_seg): + """Run forward function and calculate loss for auxiliary head in + training.""" + losses = dict() + if isinstance(self.auxiliary_head, nn.ModuleList): + for idx, aux_head in enumerate(self.auxiliary_head): + loss_aux = aux_head.forward_train(x, img_metas, + gt_semantic_seg, + self.train_cfg) + losses.update(add_prefix(loss_aux, f'aux_{idx}')) + else: + loss_aux = self.auxiliary_head.forward_train( + x, img_metas, gt_semantic_seg, self.train_cfg) + losses.update(add_prefix(loss_aux, 'aux')) + + return losses + + def forward_dummy(self, img): + """Dummy forward function.""" + seg_logit = self.encode_decode(img, None) + + return seg_logit + + def forward_train(self, img, img_metas, gt_semantic_seg, **kwargs): + """Forward function for training. + + Args: + img (Tensor): Input images. + img_metas (list[dict]): List of image info dict where each dict + has: 'img_shape', 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + gt_semantic_seg (Tensor): Semantic segmentation masks + used if the architecture supports semantic segmentation task. + + Returns: + dict[str, Tensor]: a dictionary of loss components + """ + + x = self.extract_feat(img) + + losses = dict() + + loss_decode = self._decode_head_forward_train(x, img_metas, + gt_semantic_seg, + **kwargs) + losses.update(loss_decode) + + if self.with_auxiliary_head: + loss_aux = self._auxiliary_head_forward_train( + x, img_metas, gt_semantic_seg) + losses.update(loss_aux) + + return losses + + # TODO refactor + def slide_inference(self, img, img_meta, rescale, unpad=True): + """Inference by sliding-window with overlap. + + If h_crop > h_img or w_crop > w_img, the small patch will be used to + decode without padding. + """ + + h_stride, w_stride = self.test_cfg.stride + h_crop, w_crop = self.test_cfg.crop_size + batch_size, _, h_img, w_img = img.size() + num_classes = self.num_classes + h_grids = max(h_img - h_crop + h_stride - 1, 0) // h_stride + 1 + w_grids = max(w_img - w_crop + w_stride - 1, 0) // w_stride + 1 + preds = img.new_zeros((batch_size, num_classes, h_img, w_img)) + count_mat = img.new_zeros((batch_size, 1, h_img, w_img)) + for h_idx in range(h_grids): + for w_idx in range(w_grids): + y1 = h_idx * h_stride + x1 = w_idx * w_stride + y2 = min(y1 + h_crop, h_img) + x2 = min(x1 + w_crop, w_img) + y1 = max(y2 - h_crop, 0) + x1 = max(x2 - w_crop, 0) + crop_img = img[:, :, y1:y2, x1:x2] + crop_seg_logit = self.encode_decode(crop_img, img_meta) + preds += F.pad(crop_seg_logit, + (int(x1), int(preds.shape[3] - x2), int(y1), + int(preds.shape[2] - y2))) + + count_mat[:, :, y1:y2, x1:x2] += 1 + assert (count_mat == 0).sum() == 0 + if torch.onnx.is_in_onnx_export(): + # cast count_mat to constant while exporting to ONNX + count_mat = torch.from_numpy( + count_mat.cpu().detach().numpy()).to(device=img.device) + preds = preds / count_mat + + if unpad: + unpad_h, unpad_w = img_meta[0]['img_shape'][:2] + # logging.info(preds.shape, img_meta[0]) + preds = preds[:, :, :unpad_h, :unpad_w] + if rescale: + preds = resize(preds, + size=img_meta[0]['ori_shape'][:2], + mode='bilinear', + align_corners=self.align_corners, + warning=False) + return preds + + def whole_inference(self, img, img_meta, rescale): + """Inference with full image.""" + + seg_logit = self.encode_decode(img, img_meta) + if rescale: + # support dynamic shape for onnx + if torch.onnx.is_in_onnx_export(): + size = img.shape[2:] + else: + size = img_meta[0]['ori_shape'][:2] + seg_logit = resize( + seg_logit, + size=size, + mode='bilinear', + align_corners=self.align_corners, + warning=False) + + return seg_logit + + def inference(self, img, img_meta, rescale): + """Inference with slide/whole style. + + Args: + img (Tensor): The input image of shape (N, 3, H, W). + img_meta (dict): Image info dict where each dict has: 'img_shape', + 'scale_factor', 'flip', and may also contain + 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. + For details on the values of these keys see + `mmseg/datasets/pipelines/formatting.py:Collect`. + rescale (bool): Whether rescale back to original shape. + + Returns: + Tensor: The output segmentation map. + """ + + assert self.test_cfg.mode in ['slide', 'whole'] + ori_shape = img_meta[0]['ori_shape'] + assert all(_['ori_shape'] == ori_shape for _ in img_meta) + if self.test_cfg.mode == 'slide': + seg_logit = self.slide_inference(img, img_meta, rescale) + else: + seg_logit = self.whole_inference(img, img_meta, rescale) + output = F.softmax(seg_logit, dim=1) + flip = img_meta[0]['flip'] + if flip: + flip_direction = img_meta[0]['flip_direction'] + assert flip_direction in ['horizontal', 'vertical'] + if flip_direction == 'horizontal': + output = output.flip(dims=(3, )) + elif flip_direction == 'vertical': + output = output.flip(dims=(2, )) + + return output + + def simple_test(self, img, img_meta, rescale=True): + """Simple test with single image.""" + seg_logit = self.inference(img, img_meta, rescale) + seg_pred = seg_logit.argmax(dim=1) + if torch.onnx.is_in_onnx_export(): + # our inference backend only support 4D output + seg_pred = seg_pred.unsqueeze(0) + return seg_pred + seg_pred = seg_pred.cpu().numpy() + # unravel batch dim + seg_pred = list(seg_pred) + return seg_pred + + def aug_test(self, imgs, img_metas, rescale=True): + """Test with augmentations. + + Only rescale=True is supported. + """ + # aug_test rescale all imgs back to ori_shape for now + assert rescale + # to save memory, we get augmented seg logit inplace + seg_logit = self.inference(imgs[0], img_metas[0], rescale) + for i in range(1, len(imgs)): + cur_seg_logit = self.inference(imgs[i], img_metas[i], rescale) + seg_logit += cur_seg_logit + seg_logit /= len(imgs) + seg_pred = seg_logit.argmax(dim=1) + seg_pred = seg_pred.cpu().numpy() + # unravel batch dim + seg_pred = list(seg_pred) + return seg_pred diff --git a/segmentation/mmseg_custom/models/utils/__init__.py b/segmentation/mmseg_custom/models/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebc710ff0e9bc9cc9d2e6b2a32382ea08bf55643 --- /dev/null +++ b/segmentation/mmseg_custom/models/utils/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Shanghai AI Lab. All rights reserved. +from .assigner import MaskHungarianAssigner +from .point_sample import get_uncertain_point_coords_with_randomness +from .positional_encoding import (LearnedPositionalEncoding, + SinePositionalEncoding) +from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer, + DynamicConv, Transformer) + +__all__ = [ + 'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'DynamicConv', + 'Transformer', 'LearnedPositionalEncoding', 'SinePositionalEncoding', + 'MaskHungarianAssigner', 'get_uncertain_point_coords_with_randomness' +] diff --git a/segmentation/mmseg_custom/models/utils/__pycache__/__init__.cpython-39.pyc b/segmentation/mmseg_custom/models/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c8045abf6e2a251033ae9aa6575d38844aa090f Binary files /dev/null and b/segmentation/mmseg_custom/models/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/utils/__pycache__/assigner.cpython-39.pyc b/segmentation/mmseg_custom/models/utils/__pycache__/assigner.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92f475c187ef7adb4056a7afb2070da1154b6b68 Binary files /dev/null and b/segmentation/mmseg_custom/models/utils/__pycache__/assigner.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/utils/__pycache__/point_sample.cpython-39.pyc b/segmentation/mmseg_custom/models/utils/__pycache__/point_sample.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c00452e1308a20165f018f36b6edfea91e086416 Binary files /dev/null and b/segmentation/mmseg_custom/models/utils/__pycache__/point_sample.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/utils/__pycache__/positional_encoding.cpython-39.pyc b/segmentation/mmseg_custom/models/utils/__pycache__/positional_encoding.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..651f12209debc5b71fd8c79e470b2a977839e67e Binary files /dev/null and b/segmentation/mmseg_custom/models/utils/__pycache__/positional_encoding.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/utils/__pycache__/transformer.cpython-39.pyc b/segmentation/mmseg_custom/models/utils/__pycache__/transformer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85a61ced4c86b262b202a4f6cf96fbf23f7ad3e7 Binary files /dev/null and b/segmentation/mmseg_custom/models/utils/__pycache__/transformer.cpython-39.pyc differ diff --git a/segmentation/mmseg_custom/models/utils/assigner.py b/segmentation/mmseg_custom/models/utils/assigner.py new file mode 100644 index 0000000000000000000000000000000000000000..1d6028940dc415ed13dd8d81119ab1394b95f2ed --- /dev/null +++ b/segmentation/mmseg_custom/models/utils/assigner.py @@ -0,0 +1,165 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import ABCMeta, abstractmethod + +import torch +import torch.nn.functional as F + +from ..builder import MASK_ASSIGNERS, build_match_cost + +try: + from scipy.optimize import linear_sum_assignment +except ImportError: + linear_sum_assignment = None + + +class AssignResult(metaclass=ABCMeta): + """Collection of assign results.""" + def __init__(self, num_gts, gt_inds, labels): + self.num_gts = num_gts + self.gt_inds = gt_inds + self.labels = labels + + @property + def info(self): + info = { + 'num_gts': self.num_gts, + 'gt_inds': self.gt_inds, + 'labels': self.labels, + } + return info + + +class BaseAssigner(metaclass=ABCMeta): + """Base assigner that assigns boxes to ground truth boxes.""" + @abstractmethod + def assign(self, masks, gt_masks, gt_masks_ignore=None, gt_labels=None): + """Assign boxes to either a ground truth boxes or a negative boxes.""" + pass + + +@MASK_ASSIGNERS.register_module() +class MaskHungarianAssigner(BaseAssigner): + """Computes one-to-one matching between predictions and ground truth for + mask. + + This class computes an assignment between the targets and the predictions + based on the costs. The costs are weighted sum of three components: + classification cost, regression L1 cost and regression iou cost. The + targets don't include the no_object, so generally there are more + predictions than targets. After the one-to-one matching, the un-matched + are treated as backgrounds. Thus each query prediction will be assigned + with `0` or a positive integer indicating the ground truth index: + + - 0: negative sample, no assigned gt + - positive integer: positive sample, index (1-based) of assigned gt + + Args: + cls_cost (obj:`mmcv.ConfigDict`|dict): Classification cost config. + mask_cost (obj:`mmcv.ConfigDict`|dict): Mask cost config. + dice_cost (obj:`mmcv.ConfigDict`|dict): Dice cost config. + """ + def __init__(self, + cls_cost=dict(type='ClassificationCost', weight=1.0), + dice_cost=dict(type='DiceCost', weight=1.0), + mask_cost=dict(type='MaskFocalCost', weight=1.0)): + self.cls_cost = build_match_cost(cls_cost) + self.dice_cost = build_match_cost(dice_cost) + self.mask_cost = build_match_cost(mask_cost) + + def assign(self, + cls_pred, + mask_pred, + gt_labels, + gt_masks, + img_meta, + gt_masks_ignore=None, + eps=1e-7): + """Computes one-to-one matching based on the weighted costs. + + This method assign each query prediction to a ground truth or + background. The `assigned_gt_inds` with -1 means don't care, + 0 means negative sample, and positive number is the index (1-based) + of assigned gt. + The assignment is done in the following steps, the order matters. + + 1. assign every prediction to -1 + 2. compute the weighted costs + 3. do Hungarian matching on CPU based on the costs + 4. assign all to 0 (background) first, then for each matched pair + between predictions and gts, treat this prediction as foreground + and assign the corresponding gt index (plus 1) to it. + + Args: + mask_pred (Tensor): Predicted mask, shape [num_query, h, w] + cls_pred (Tensor): Predicted classification logits, shape + [num_query, num_class]. + gt_masks (Tensor): Ground truth mask, shape [num_gt, h, w]. + gt_labels (Tensor): Label of `gt_masks`, shape (num_gt,). + img_meta (dict): Meta information for current image. + gt_masks_ignore (Tensor, optional): Ground truth masks that are + labelled as `ignored`. Default None. + eps (int | float, optional): A value added to the denominator for + numerical stability. Default 1e-7. + + Returns: + :obj:`AssignResult`: The assigned result. + """ + assert gt_masks_ignore is None, \ + 'Only case when gt_masks_ignore is None is supported.' + num_gts, num_queries = gt_labels.shape[0], cls_pred.shape[0] + + # 1. assign -1 by default + assigned_gt_inds = cls_pred.new_full((num_queries, ), + -1, + dtype=torch.long) + assigned_labels = cls_pred.new_full((num_queries, ), + -1, + dtype=torch.long) + if num_gts == 0 or num_queries == 0: + # No ground truth or boxes, return empty assignment + if num_gts == 0: + # No ground truth, assign all to background + assigned_gt_inds[:] = 0 + return AssignResult( + num_gts, assigned_gt_inds, labels=assigned_labels) + + # 2. compute the weighted costs + # classification and maskcost. + if self.cls_cost.weight != 0 and cls_pred is not None: + cls_cost = self.cls_cost(cls_pred, gt_labels) + else: + cls_cost = 0 + + if self.mask_cost.weight != 0: + # mask_pred shape = [nq, h, w] + # gt_mask shape = [ng, h, w] + # mask_cost shape = [nq, ng] + mask_cost = self.mask_cost(mask_pred, gt_masks) + else: + mask_cost = 0 + + if self.dice_cost.weight != 0: + dice_cost = self.dice_cost(mask_pred, gt_masks) + else: + dice_cost = 0 + cost = cls_cost + mask_cost + dice_cost + + # 3. do Hungarian matching on CPU using linear_sum_assignment + cost = cost.detach().cpu() + if linear_sum_assignment is None: + raise ImportError('Please run "pip install scipy" ' + 'to install scipy first.') + + matched_row_inds, matched_col_inds = linear_sum_assignment(cost) + matched_row_inds = torch.from_numpy(matched_row_inds).to( + cls_pred.device) + matched_col_inds = torch.from_numpy(matched_col_inds).to( + cls_pred.device) + + # 4. assign backgrounds and foregrounds + # assign all indices to backgrounds first + assigned_gt_inds[:] = 0 + # assign foregrounds based on matching results + assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 + assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] + return AssignResult(num_gts, assigned_gt_inds, labels=assigned_labels) diff --git a/segmentation/mmseg_custom/models/utils/point_sample.py b/segmentation/mmseg_custom/models/utils/point_sample.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c3cf91cc934987f57cf528d4a1763c0873e4b2 --- /dev/null +++ b/segmentation/mmseg_custom/models/utils/point_sample.py @@ -0,0 +1,87 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +from mmcv.ops import point_sample + + +def get_uncertainty(mask_pred, labels): + """Estimate uncertainty based on pred logits. + + We estimate uncertainty as L1 distance between 0.0 and the logits + prediction in 'mask_pred' for the foreground class in `classes`. + + Args: + mask_pred (Tensor): mask predication logits, shape (num_rois, + num_classes, mask_height, mask_width). + + labels (list[Tensor]): Either predicted or ground truth label for + each predicted mask, of length num_rois. + + Returns: + scores (Tensor): Uncertainty scores with the most uncertain + locations having the highest uncertainty score, + shape (num_rois, 1, mask_height, mask_width) + """ + if mask_pred.shape[1] == 1: + gt_class_logits = mask_pred.clone() + else: + inds = torch.arange(mask_pred.shape[0], device=mask_pred.device) + gt_class_logits = mask_pred[inds, labels].unsqueeze(1) + return -torch.abs(gt_class_logits) + + +def get_uncertain_point_coords_with_randomness(mask_pred, labels, num_points, + oversample_ratio, + importance_sample_ratio): + """Get ``num_points`` most uncertain points with random points during + train. + + Sample points in [0, 1] x [0, 1] coordinate space based on their + uncertainty. The uncertainties are calculated for each point using + 'get_uncertainty()' function that takes point's logit prediction as + input. + + Args: + mask_pred (Tensor): A tensor of shape (num_rois, num_classes, + mask_height, mask_width) for class-specific or class-agnostic + prediction. + labels (list): The ground truth class for each instance. + num_points (int): The number of points to sample. + oversample_ratio (int): Oversampling parameter. + importance_sample_ratio (float): Ratio of points that are sampled + via importnace sampling. + + Returns: + point_coords (Tensor): A tensor of shape (num_rois, num_points, 2) + that contains the coordinates sampled points. + """ + assert oversample_ratio >= 1 + assert 0 <= importance_sample_ratio <= 1 + batch_size = mask_pred.shape[0] + num_sampled = int(num_points * oversample_ratio) + point_coords = torch.rand( + batch_size, num_sampled, 2, device=mask_pred.device) + point_logits = point_sample(mask_pred, point_coords) + # It is crucial to calculate uncertainty based on the sampled + # prediction value for the points. Calculating uncertainties of the + # coarse predictions first and sampling them for points leads to + # incorrect results. To illustrate this: assume uncertainty func( + # logits)=-abs(logits), a sampled point between two coarse + # predictions with -1 and 1 logits has 0 logits, and therefore 0 + # uncertainty value. However, if we calculate uncertainties for the + # coarse predictions first, both will have -1 uncertainty, + # and sampled point will get -1 uncertainty. + point_uncertainties = get_uncertainty(point_logits, labels) + num_uncertain_points = int(importance_sample_ratio * num_points) + num_random_points = num_points - num_uncertain_points + idx = torch.topk( + point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1] + shift = num_sampled * torch.arange( + batch_size, dtype=torch.long, device=mask_pred.device) + idx += shift[:, None] + point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view( + batch_size, num_uncertain_points, 2) + if num_random_points > 0: + rand_roi_coords = torch.rand( + batch_size, num_random_points, 2, device=mask_pred.device) + point_coords = torch.cat((point_coords, rand_roi_coords), dim=1) + return point_coords diff --git a/segmentation/mmseg_custom/models/utils/positional_encoding.py b/segmentation/mmseg_custom/models/utils/positional_encoding.py new file mode 100644 index 0000000000000000000000000000000000000000..426e058365cbd635f2140d54d5787f609a7b9397 --- /dev/null +++ b/segmentation/mmseg_custom/models/utils/positional_encoding.py @@ -0,0 +1,161 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch +import torch.nn as nn +from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING +from mmcv.runner import BaseModule + + +@POSITIONAL_ENCODING.register_module() +class SinePositionalEncoding(BaseModule): + """Position encoding with sine and cosine functions. + + See `End-to-End Object Detection with Transformers + `_ for details. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. Note the final returned dimension + for each position is 2 times of this value. + temperature (int, optional): The temperature used for scaling + the position embedding. Defaults to 10000. + normalize (bool, optional): Whether to normalize the position + embedding. Defaults to False. + scale (float, optional): A scale factor that scales the position + embedding. The scale will be used only when `normalize` is True. + Defaults to 2*pi. + eps (float, optional): A value added to the denominator for + numerical stability. Defaults to 1e-6. + offset (float): offset add to embed when do the normalization. + Defaults to 0. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + def __init__(self, + num_feats, + temperature=10000, + normalize=False, + scale=2 * math.pi, + eps=1e-6, + offset=0., + init_cfg=None): + super(SinePositionalEncoding, self).__init__(init_cfg) + if normalize: + assert isinstance(scale, (float, int)), 'when normalize is set,' \ + 'scale should be provided and in float or int type, ' \ + f'found {type(scale)}' + self.num_feats = num_feats + self.temperature = temperature + self.normalize = normalize + self.scale = scale + self.eps = eps + self.offset = offset + + def forward(self, mask): + """Forward function for `SinePositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + # For convenience of exporting to ONNX, it's required to convert + # `masks` from bool to int. + mask = mask.to(torch.int) + not_mask = 1 - mask # logical_not + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + y_embed = (y_embed + self.offset) / \ + (y_embed[:, -1:, :] + self.eps) * self.scale + x_embed = (x_embed + self.offset) / \ + (x_embed[:, :, -1:] + self.eps) * self.scale + dim_t = torch.arange( + self.num_feats, dtype=torch.float32, device=mask.device) + dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + # use `view` instead of `flatten` for dynamically exporting to ONNX + B, H, W = mask.size() + pos_x = torch.stack( + (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), + dim=4).view(B, H, W, -1) + pos_y = torch.stack( + (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), + dim=4).view(B, H, W, -1) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'temperature={self.temperature}, ' + repr_str += f'normalize={self.normalize}, ' + repr_str += f'scale={self.scale}, ' + repr_str += f'eps={self.eps})' + return repr_str + + +@POSITIONAL_ENCODING.register_module() +class LearnedPositionalEncoding(BaseModule): + """Position embedding with learnable embedding weights. + + Args: + num_feats (int): The feature dimension for each position + along x-axis or y-axis. The final returned dimension for + each position is 2 times of this value. + row_num_embed (int, optional): The dictionary size of row embeddings. + Default 50. + col_num_embed (int, optional): The dictionary size of col embeddings. + Default 50. + init_cfg (dict or list[dict], optional): Initialization config dict. + """ + def __init__(self, + num_feats, + row_num_embed=50, + col_num_embed=50, + init_cfg=dict(type='Uniform', layer='Embedding')): + super(LearnedPositionalEncoding, self).__init__(init_cfg) + self.row_embed = nn.Embedding(row_num_embed, num_feats) + self.col_embed = nn.Embedding(col_num_embed, num_feats) + self.num_feats = num_feats + self.row_num_embed = row_num_embed + self.col_num_embed = col_num_embed + + def forward(self, mask): + """Forward function for `LearnedPositionalEncoding`. + + Args: + mask (Tensor): ByteTensor mask. Non-zero values representing + ignored positions, while zero values means valid positions + for this image. Shape [bs, h, w]. + + Returns: + pos (Tensor): Returned position embedding with shape + [bs, num_feats*2, h, w]. + """ + h, w = mask.shape[-2:] + x = torch.arange(w, device=mask.device) + y = torch.arange(h, device=mask.device) + x_embed = self.col_embed(x) + y_embed = self.row_embed(y) + pos = torch.cat( + (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat( + 1, w, 1)), + dim=-1).permute(2, 0, + 1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1) + return pos + + def __repr__(self): + """str: a string that describes the module""" + repr_str = self.__class__.__name__ + repr_str += f'(num_feats={self.num_feats}, ' + repr_str += f'row_num_embed={self.row_num_embed}, ' + repr_str += f'col_num_embed={self.col_num_embed})' + return repr_str diff --git a/segmentation/mmseg_custom/models/utils/transformer.py b/segmentation/mmseg_custom/models/utils/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..36c0797ad0432072755c1dc1a43d9c45156da954 --- /dev/null +++ b/segmentation/mmseg_custom/models/utils/transformer.py @@ -0,0 +1,1082 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +import warnings +from typing import Sequence + +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from mmcv.cnn import (Linear, build_activation_layer, build_conv_layer, + build_norm_layer, xavier_init) +from mmcv.cnn.bricks.drop import build_dropout +from mmcv.cnn.bricks.registry import (FEEDFORWARD_NETWORK, TRANSFORMER_LAYER, + TRANSFORMER_LAYER_SEQUENCE) +from mmcv.cnn.bricks.transformer import (BaseTransformerLayer, + TransformerLayerSequence, + build_attention, + build_feedforward_network, + build_transformer_layer_sequence) +from mmcv.runner.base_module import BaseModule, ModuleList, Sequential +from mmcv.utils import ConfigDict, deprecated_api_warning, to_2tuple +from torch.nn.init import normal_ + +from ..builder import TRANSFORMER + +try: + from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention + +except ImportError: + warnings.warn( + '`MultiScaleDeformableAttention` in MMCV has been moved to ' + '`mmcv.ops.multi_scale_deform_attn`, please update your MMCV') + from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention + + +class AdaptivePadding(nn.Module): + """Applies padding to input (if needed) so that input can get fully covered + by filter you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad zero around + input. The "corner" mode would pad zero to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel: + stride (int | tuple): Stride of the filter. Default: 1: + dilation (int | tuple): Spacing between kernel elements. + Default: 1 + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + + super(AdaptivePadding, self).__init__() + + assert padding in ('same', 'corner') + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + padding = to_2tuple(padding) + dilation = to_2tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + input_h, input_w = input_shape + kernel_h, kernel_w = self.kernel_size + stride_h, stride_w = self.stride + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0) + return pad_h, pad_w + + def forward(self, x): + pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, pad_w - pad_w // 2, pad_h // 2, + pad_h - pad_h // 2 + ]) + return x + + +class PatchMerging(BaseModule): + """Merge patch feature map. + + This layer groups feature map by kernel_size, and applies norm and linear + layers to the grouped feature map. Our implementation uses `nn.Unfold` to + merge patch, which is about 25% faster than original implementation. + Instead, we need to modify pretrained models for compatibility. + + Args: + in_channels (int): The num of input channels. + to gets fully covered by filter and stride you specified.. + Default: True. + out_channels (int): The num of output channels. + kernel_size (int | tuple, optional): the kernel size in the unfold + layer. Defaults to 2. + stride (int | tuple, optional): the stride of the sliding blocks in the + unfold layer. Default: None. (Would be set as `kernel_size`) + padding (int | tuple | string ): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int | tuple, optional): dilation parameter in the unfold + layer. Default: 1. + bias (bool, optional): Whether to add bias in linear layer or not. + Defaults: False. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: dict(type='LN'). + init_cfg (dict, optional): The extra config for initialization. + Default: None. + """ + def __init__(self, + in_channels, + out_channels, + kernel_size=2, + stride=None, + padding='corner', + dilation=1, + bias=False, + norm_cfg=dict(type='LN'), + init_cfg=None): + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + if stride: + stride = stride + else: + stride = kernel_size + + kernel_size = to_2tuple(kernel_size) + stride = to_2tuple(stride) + dilation = to_2tuple(dilation) + + if isinstance(padding, str): + self.adap_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of unfold + padding = 0 + else: + self.adap_padding = None + + padding = to_2tuple(padding) + self.sampler = nn.Unfold( + kernel_size=kernel_size, + dilation=dilation, + padding=padding, + stride=stride) + + sample_dim = kernel_size[0] * kernel_size[1] * in_channels + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, sample_dim)[1] + else: + self.norm = None + + self.reduction = nn.Linear(sample_dim, out_channels, bias=bias) + + def forward(self, x, input_size): + """ + Args: + x (Tensor): Has shape (B, H*W, C_in). + input_size (tuple[int]): The spatial shape of x, arrange as (H, W). + Default: None. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out) + - out_size (tuple[int]): Spatial shape of x, arrange as + (Merged_H, Merged_W). + """ + B, L, C = x.shape + assert isinstance(input_size, Sequence), f'Expect ' \ + f'input_size is ' \ + f'`Sequence` ' \ + f'but get {input_size}' + + H, W = input_size + assert L == H * W, 'input feature has wrong size' + + x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W + # Use nn.Unfold to merge patch. About 25% faster than original method, + # but need to modify pretrained model for compatibility + + if self.adap_padding: + x = self.adap_padding(x) + H, W = x.shape[-2:] + + x = self.sampler(x) + # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2) + + out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] * + (self.sampler.kernel_size[0] - 1) - + 1) // self.sampler.stride[0] + 1 + out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] * + (self.sampler.kernel_size[1] - 1) - + 1) // self.sampler.stride[1] + 1 + + output_size = (out_h, out_w) + x = x.transpose(1, 2) # B, H/2*W/2, 4*C + x = self.norm(x) if self.norm else x + x = self.reduction(x) + return x, output_size + + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + + Args: + x (Tensor): The tensor to do the + inverse. + eps (float): EPS avoid numerical + overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +@FEEDFORWARD_NETWORK.register_module(force=True) +class FFN(BaseModule): + """Implements feed-forward networks (FFNs) with identity connection. + Args: + embed_dims (int): The feature dimension. Same as + `MultiheadAttention`. Defaults: 256. + feedforward_channels (int): The hidden dimension of FFNs. + Defaults: 1024. + num_fcs (int, optional): The number of fully-connected layers in + FFNs. Default: 2. + act_cfg (dict, optional): The activation config for FFNs. + Default: dict(type='ReLU') + ffn_drop (float, optional): Probability of an element to be + zeroed in FFN. Default 0.0. + add_identity (bool, optional): Whether to add the + identity connection. Default: `True`. + dropout_layer (obj:`ConfigDict`): The dropout_layer used + when adding the shortcut. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + + @deprecated_api_warning( + { + 'dropout': 'ffn_drop', + 'add_residual': 'add_identity' + }, + cls_name='FFN') + def __init__(self, + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0., + dropout_layer=None, + add_identity=True, + init_cfg=None, + with_cp=False, + **kwargs): + super().__init__(init_cfg) + assert num_fcs >= 2, 'num_fcs should be no less ' \ + f'than 2. got {num_fcs}.' + self.embed_dims = embed_dims + self.feedforward_channels = feedforward_channels + self.num_fcs = num_fcs + self.act_cfg = act_cfg + self.activate = build_activation_layer(act_cfg) + self.with_cp = with_cp + layers = [] + in_channels = embed_dims + for _ in range(num_fcs - 1): + layers.append( + Sequential( + Linear(in_channels, feedforward_channels), self.activate, + nn.Dropout(ffn_drop))) + in_channels = feedforward_channels + layers.append(Linear(feedforward_channels, embed_dims)) + layers.append(nn.Dropout(ffn_drop)) + self.layers = Sequential(*layers) + self.dropout_layer = build_dropout( + dropout_layer) if dropout_layer else torch.nn.Identity() + self.add_identity = add_identity + + @deprecated_api_warning({'residual': 'identity'}, cls_name='FFN') + def forward(self, x, identity=None): + """Forward function for `FFN`. + The function would add x to the output tensor if residue is None. + """ + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self.layers, x) + else: + out = self.layers(x) + + if not self.add_identity: + return self.dropout_layer(out) + if identity is None: + identity = x + return identity + self.dropout_layer(out) + + +@TRANSFORMER_LAYER.register_module() +class DetrTransformerDecoderLayer(BaseTransformerLayer): + """Implements decoder layer in DETR transformer. + + Args: + attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )): + Configs for self_attention or cross_attention, the order + should be consistent with it in `operation_order`. If it is + a dict, it would be expand to the number of attention in + `operation_order`. + feedforward_channels (int): The hidden dimension for FFNs. + ffn_dropout (float): Probability of an element to be zeroed + in ffn. Default 0.0. + operation_order (tuple[str]): The execution order of operation + in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm'). + Default:None + act_cfg (dict): The activation config for FFNs. Default: `LN` + norm_cfg (dict): Config dict for normalization layer. + Default: `LN`. + ffn_num_fcs (int): The number of fully-connected layers in FFNs. + Default:2. + """ + def __init__(self, + attn_cfgs, + feedforward_channels, + ffn_dropout=0.0, + operation_order=None, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + ffn_num_fcs=2, + **kwargs): + super(DetrTransformerDecoderLayer, self).__init__( + attn_cfgs=attn_cfgs, + feedforward_channels=feedforward_channels, + ffn_dropout=ffn_dropout, + operation_order=operation_order, + act_cfg=act_cfg, + norm_cfg=norm_cfg, + ffn_num_fcs=ffn_num_fcs, + **kwargs) + assert len(operation_order) == 6 + assert set(operation_order) == set( + ['self_attn', 'norm', 'cross_attn', 'ffn']) + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DetrTransformerEncoder(TransformerLayerSequence): + """TransformerEncoder of DETR. + + Args: + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. Only used when `self.pre_norm` is `True` + """ + def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs): + super(DetrTransformerEncoder, self).__init__(*args, **kwargs) + if post_norm_cfg is not None: + self.post_norm = build_norm_layer( + post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None + else: + assert not self.pre_norm, f'Use prenorm in ' \ + f'{self.__class__.__name__},' \ + f'Please specify post_norm_cfg' + self.post_norm = None + + def forward(self, *args, **kwargs): + """Forward function for `TransformerCoder`. + + Returns: + Tensor: forwarded results with shape [num_query, bs, embed_dims]. + """ + x = super(DetrTransformerEncoder, self).forward(*args, **kwargs) + if self.post_norm is not None: + x = self.post_norm(x) + return x + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DetrTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + post_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + def __init__(self, + *args, + post_norm_cfg=dict(type='LN'), + return_intermediate=False, + **kwargs): + + super(DetrTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + if post_norm_cfg is not None: + self.post_norm = build_norm_layer(post_norm_cfg, + self.embed_dims)[1] + else: + self.post_norm = None + + def forward(self, query, *args, **kwargs): + """Forward function for `TransformerDecoder`. + + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + if not self.return_intermediate: + x = super().forward(query, *args, **kwargs) + if self.post_norm: + x = self.post_norm(x)[None] + return x + + intermediate = [] + for layer in self.layers: + query = layer(query, *args, **kwargs) + if self.return_intermediate: + if self.post_norm is not None: + intermediate.append(self.post_norm(query)) + else: + intermediate.append(query) + return torch.stack(intermediate) + + +@TRANSFORMER.register_module() +class Transformer(BaseModule): + """Implements the DETR transformer. + + Following the official DETR implementation, this module copy-paste + from torch.nn.Transformer with modifications: + + * positional encodings are passed in MultiheadAttention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers + + See `paper: End-to-End Object Detection with Transformers + `_ for details. + + Args: + encoder (`mmcv.ConfigDict` | Dict): Config of + TransformerEncoder. Defaults to None. + decoder ((`mmcv.ConfigDict` | Dict)): Config of + TransformerDecoder. Defaults to None + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Defaults to None. + """ + def __init__(self, encoder=None, decoder=None, init_cfg=None): + super(Transformer, self).__init__(init_cfg=init_cfg) + self.encoder = build_transformer_layer_sequence(encoder) + self.decoder = build_transformer_layer_sequence(decoder) + self.embed_dims = self.encoder.embed_dims + + def init_weights(self): + # follow the official DETR to init parameters + for m in self.modules(): + if hasattr(m, 'weight') and m.weight.dim() > 1: + xavier_init(m, distribution='uniform') + self._is_init = True + + def forward(self, x, mask, query_embed, pos_embed): + """Forward function for `Transformer`. + + Args: + x (Tensor): Input query with shape [bs, c, h, w] where + c = embed_dims. + mask (Tensor): The key_padding_mask used for encoder and decoder, + with shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, with shape + [num_query, c]. + pos_embed (Tensor): The positional encoding for encoder and + decoder, with the same shape as `x`. + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - out_dec: Output from decoder. If return_intermediate_dec \ + is True output has shape [num_dec_layers, bs, + num_query, embed_dims], else has shape [1, bs, \ + num_query, embed_dims]. + - memory: Output results from encoder, with shape \ + [bs, embed_dims, h, w]. + """ + bs, c, h, w = x.shape + # use `view` instead of `flatten` for dynamically exporting to ONNX + x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c] + pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat( + 1, bs, 1) # [num_query, dim] -> [num_query, bs, dim] + mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w] + memory = self.encoder( + query=x, + key=None, + value=None, + query_pos=pos_embed, + query_key_padding_mask=mask) + target = torch.zeros_like(query_embed) + # out_dec: [num_layers, num_query, bs, dim] + out_dec = self.decoder( + query=target, + key=memory, + value=memory, + key_pos=pos_embed, + query_pos=query_embed, + key_padding_mask=mask) + out_dec = out_dec.transpose(1, 2) + memory = memory.permute(1, 2, 0).reshape(bs, c, h, w) + return out_dec, memory + + +@TRANSFORMER_LAYER_SEQUENCE.register_module() +class DeformableDetrTransformerDecoder(TransformerLayerSequence): + """Implements the decoder in DETR transformer. + + Args: + return_intermediate (bool): Whether to return intermediate outputs. + coder_norm_cfg (dict): Config of last normalization layer. Default: + `LN`. + """ + def __init__(self, *args, return_intermediate=False, **kwargs): + + super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs) + self.return_intermediate = return_intermediate + + def forward(self, + query, + *args, + reference_points=None, + valid_ratios=None, + reg_branches=None, + **kwargs): + """Forward function for `TransformerDecoder`. + + Args: + query (Tensor): Input query with shape + `(num_query, bs, embed_dims)`. + reference_points (Tensor): The reference + points of offset. has shape + (bs, num_query, 4) when as_two_stage, + otherwise has shape ((bs, num_query, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + reg_branch: (obj:`nn.ModuleList`): Used for + refining the regression results. Only would + be passed when with_box_refine is True, + otherwise would be passed a `None`. + + Returns: + Tensor: Results with shape [1, num_query, bs, embed_dims] when + return_intermediate is `False`, otherwise it has shape + [num_layers, num_query, bs, embed_dims]. + """ + output = query + intermediate = [] + intermediate_reference_points = [] + for lid, layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = reference_points[:, :, None] * \ + torch.cat([valid_ratios, valid_ratios], + -1)[:, None] + else: + assert reference_points.shape[-1] == 2 + reference_points_input = reference_points[:, :, None] * \ + valid_ratios[:, None] + output = layer( + output, + *args, + reference_points=reference_points_input, + **kwargs) + output = output.permute(1, 0, 2) + + if reg_branches is not None: + tmp = reg_branches[lid](output) + if reference_points.shape[-1] == 4: + new_reference_points = tmp + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + assert reference_points.shape[-1] == 2 + new_reference_points = tmp + new_reference_points[..., :2] = tmp[ + ..., :2] + inverse_sigmoid( + reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + output = output.permute(1, 0, 2) + if self.return_intermediate: + intermediate.append(output) + intermediate_reference_points.append(reference_points) + + if self.return_intermediate: + return torch.stack(intermediate), torch.stack( + intermediate_reference_points) + + return output, reference_points + + +@TRANSFORMER.register_module() +class DeformableDetrTransformer(Transformer): + """Implements the DeformableDETR transformer. + + Args: + as_two_stage (bool): Generate query from encoder features. + Default: False. + num_feature_levels (int): Number of feature maps from FPN: + Default: 4. + two_stage_num_proposals (int): Number of proposals when set + `as_two_stage` as True. Default: 300. + """ + + def __init__(self, + as_two_stage=False, + num_feature_levels=4, + two_stage_num_proposals=300, + **kwargs): + super(DeformableDetrTransformer, self).__init__(**kwargs) + self.as_two_stage = as_two_stage + self.num_feature_levels = num_feature_levels + self.two_stage_num_proposals = two_stage_num_proposals + self.embed_dims = self.encoder.embed_dims + self.init_layers() + + def init_layers(self): + """Initialize layers of the DeformableDetrTransformer.""" + self.level_embeds = nn.Parameter( + torch.Tensor(self.num_feature_levels, self.embed_dims)) + + if self.as_two_stage: + self.enc_output = nn.Linear(self.embed_dims, self.embed_dims) + self.enc_output_norm = nn.LayerNorm(self.embed_dims) + self.pos_trans = nn.Linear(self.embed_dims * 2, + self.embed_dims * 2) + self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2) + else: + self.reference_points = nn.Linear(self.embed_dims, 2) + + def init_weights(self): + """Initialize the transformer weights.""" + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + for m in self.modules(): + if isinstance(m, MultiScaleDeformableAttention): + m.init_weights() + if not self.as_two_stage: + xavier_init(self.reference_points, distribution='uniform', bias=0.) + normal_(self.level_embeds) + + def gen_encoder_output_proposals(self, memory, memory_padding_mask, + spatial_shapes): + """Generate proposals from encoded memory. + + Args: + memory (Tensor) : The output of encoder, + has shape (bs, num_key, embed_dim). num_key is + equal the number of points on feature map from + all level. + memory_padding_mask (Tensor): Padding mask for memory. + has shape (bs, num_key). + spatial_shapes (Tensor): The shape of all feature maps. + has shape (num_level, 2). + + Returns: + tuple: A tuple of feature map and bbox prediction. + + - output_memory (Tensor): The input of decoder, \ + has shape (bs, num_key, embed_dim). num_key is \ + equal the number of points on feature map from \ + all levels. + - output_proposals (Tensor): The normalized proposal \ + after a inverse sigmoid, has shape \ + (bs, num_keys, 4). + """ + + N, S, C = memory.shape + proposals = [] + _cur = 0 + for lvl, (H, W) in enumerate(spatial_shapes): + mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view( + N, H, W, 1) + valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = torch.meshgrid( + torch.linspace( + 0, H - 1, H, dtype=torch.float32, device=memory.device), + torch.linspace( + 0, W - 1, W, dtype=torch.float32, device=memory.device)) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_W.unsqueeze(-1), + valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale + wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl) + proposal = torch.cat((grid, wh), -1).view(N, -1, 4) + proposals.append(proposal) + _cur += (H * W) + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & + (output_proposals < 0.99)).all( + -1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) + output_proposals = output_proposals.masked_fill( + memory_padding_mask.unsqueeze(-1), float('inf')) + output_proposals = output_proposals.masked_fill( + ~output_proposals_valid, float('inf')) + + output_memory = memory + output_memory = output_memory.masked_fill( + memory_padding_mask.unsqueeze(-1), float(0)) + output_memory = output_memory.masked_fill(~output_proposals_valid, + float(0)) + output_memory = self.enc_output_norm(self.enc_output(output_memory)) + return output_memory, output_proposals + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """Get the reference points used in decoder. + + Args: + spatial_shapes (Tensor): The shape of all + feature maps, has shape (num_level, 2). + valid_ratios (Tensor): The radios of valid + points on the feature map, has shape + (bs, num_levels, 2) + device (obj:`device`): The device where + reference_points should be. + + Returns: + Tensor: reference points used in decoder, has \ + shape (bs, num_keys, num_levels, 2). + """ + reference_points_list = [] + for lvl, (H, W) in enumerate(spatial_shapes): + # TODO check this 0.5 + ref_y, ref_x = torch.meshgrid( + torch.linspace( + 0.5, H - 0.5, H, dtype=torch.float32, device=device), + torch.linspace( + 0.5, W - 0.5, W, dtype=torch.float32, device=device)) + ref_y = ref_y.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 1] * H) + ref_x = ref_x.reshape(-1)[None] / ( + valid_ratios[:, None, lvl, 0] * W) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def get_valid_ratio(self, mask): + """Get the valid radios of feature maps of all level.""" + _, H, W = mask.shape + valid_H = torch.sum(~mask[:, :, 0], 1) + valid_W = torch.sum(~mask[:, 0, :], 1) + valid_ratio_h = valid_H.float() / H + valid_ratio_w = valid_W.float() / W + valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1) + return valid_ratio + + def get_proposal_pos_embed(self, + proposals, + num_pos_feats=128, + temperature=10000): + """Get the position embedding of proposal.""" + scale = 2 * math.pi + dim_t = torch.arange( + num_pos_feats, dtype=torch.float32, device=proposals.device) + dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats) + # N, L, 4 + proposals = proposals.sigmoid() * scale + # N, L, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # N, L, 4, 64, 2 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), + dim=4).flatten(2) + return pos + + def forward(self, + mlvl_feats, + mlvl_masks, + query_embed, + mlvl_pos_embeds, + reg_branches=None, + cls_branches=None, + **kwargs): + """Forward function for `Transformer`. + + Args: + mlvl_feats (list(Tensor)): Input queries from + different level. Each element has shape + [bs, embed_dims, h, w]. + mlvl_masks (list(Tensor)): The key_padding_mask from + different level used for encoder and decoder, + each element has shape [bs, h, w]. + query_embed (Tensor): The query embedding for decoder, + with shape [num_query, c]. + mlvl_pos_embeds (list(Tensor)): The positional encoding + of feats from different level, has the shape + [bs, embed_dims, h, w]. + reg_branches (obj:`nn.ModuleList`): Regression heads for + feature maps from each decoder layer. Only would + be passed when + `with_box_refine` is True. Default to None. + cls_branches (obj:`nn.ModuleList`): Classification heads + for feature maps from each decoder layer. Only would + be passed when `as_two_stage` + is True. Default to None. + + + Returns: + tuple[Tensor]: results of decoder containing the following tensor. + + - inter_states: Outputs from decoder. If + return_intermediate_dec is True output has shape \ + (num_dec_layers, bs, num_query, embed_dims), else has \ + shape (1, bs, num_query, embed_dims). + - init_reference_out: The initial value of reference \ + points, has shape (bs, num_queries, 4). + - inter_references_out: The internal value of reference \ + points in decoder, has shape \ + (num_dec_layers, bs,num_query, embed_dims) + - enc_outputs_class: The classification score of \ + proposals generated from \ + encoder's feature maps, has shape \ + (batch, h*w, num_classes). \ + Only would be returned when `as_two_stage` is True, \ + otherwise None. + - enc_outputs_coord_unact: The regression results \ + generated from encoder's feature maps., has shape \ + (batch, h*w, 4). Only would \ + be returned when `as_two_stage` is True, \ + otherwise None. + """ + assert self.as_two_stage or query_embed is not None + + feat_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for lvl, (feat, mask, pos_embed) in enumerate( + zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)): + bs, c, h, w = feat.shape + spatial_shape = (h, w) + spatial_shapes.append(spatial_shape) + feat = feat.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + feat_flatten.append(feat) + mask_flatten.append(mask) + feat_flatten = torch.cat(feat_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor( + spatial_shapes, dtype=torch.long, device=feat_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros( + (1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack( + [self.get_valid_ratio(m) for m in mlvl_masks], 1) + + reference_points = \ + self.get_reference_points(spatial_shapes, + valid_ratios, + device=feat.device) + + feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims) + lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute( + 1, 0, 2) # (H*W, bs, embed_dims) + memory = self.encoder( + query=feat_flatten, + key=None, + value=None, + query_pos=lvl_pos_embed_flatten, + query_key_padding_mask=mask_flatten, + spatial_shapes=spatial_shapes, + reference_points=reference_points, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + **kwargs) + + memory = memory.permute(1, 0, 2) + bs, _, c = memory.shape + if self.as_two_stage: + output_memory, output_proposals = \ + self.gen_encoder_output_proposals( + memory, mask_flatten, spatial_shapes) + enc_outputs_class = cls_branches[self.decoder.num_layers]( + output_memory) + enc_outputs_coord_unact = \ + reg_branches[ + self.decoder.num_layers](output_memory) + output_proposals + + topk = self.two_stage_num_proposals + topk_proposals = torch.topk( + enc_outputs_class[..., 0], topk, dim=1)[1] + topk_coords_unact = torch.gather( + enc_outputs_coord_unact, 1, + topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) + topk_coords_unact = topk_coords_unact.detach() + reference_points = topk_coords_unact.sigmoid() + init_reference_out = reference_points + pos_trans_out = self.pos_trans_norm( + self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))) + query_pos, query = torch.split(pos_trans_out, c, dim=2) + else: + query_pos, query = torch.split(query_embed, c, dim=1) + query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1) + query = query.unsqueeze(0).expand(bs, -1, -1) + reference_points = self.reference_points(query_pos).sigmoid() + init_reference_out = reference_points + + # decoder + query = query.permute(1, 0, 2) + memory = memory.permute(1, 0, 2) + query_pos = query_pos.permute(1, 0, 2) + inter_states, inter_references = self.decoder( + query=query, + key=None, + value=memory, + query_pos=query_pos, + key_padding_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + reg_branches=reg_branches, + **kwargs) + + inter_references_out = inter_references + if self.as_two_stage: + return inter_states, init_reference_out, \ + inter_references_out, enc_outputs_class, \ + enc_outputs_coord_unact + return inter_states, init_reference_out, \ + inter_references_out, None, None + + +@TRANSFORMER.register_module() +class DynamicConv(BaseModule): + """Implements Dynamic Convolution. + + This module generate parameters for each sample and + use bmm to implement 1*1 convolution. Code is modified + from the `official github repo `_ . + + Args: + in_channels (int): The input feature channel. + Defaults to 256. + feat_channels (int): The inner feature channel. + Defaults to 64. + out_channels (int, optional): The output feature channel. + When not specified, it will be set to `in_channels` + by default + input_feat_shape (int): The shape of input feature. + Defaults to 7. + with_proj (bool): Project two-dimentional feature to + one-dimentional feature. Default to True. + act_cfg (dict): The activation config for DynamicConv. + norm_cfg (dict): Config dict for normalization layer. Default + layer normalization. + init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization. + Default: None. + """ + def __init__(self, + in_channels=256, + feat_channels=64, + out_channels=None, + input_feat_shape=7, + with_proj=True, + act_cfg=dict(type='ReLU', inplace=True), + norm_cfg=dict(type='LN'), + init_cfg=None): + super(DynamicConv, self).__init__(init_cfg) + self.in_channels = in_channels + self.feat_channels = feat_channels + self.out_channels_raw = out_channels + self.input_feat_shape = input_feat_shape + self.with_proj = with_proj + self.act_cfg = act_cfg + self.norm_cfg = norm_cfg + self.out_channels = out_channels if out_channels else in_channels + + self.num_params_in = self.in_channels * self.feat_channels + self.num_params_out = self.out_channels * self.feat_channels + self.dynamic_layer = nn.Linear( + self.in_channels, self.num_params_in + self.num_params_out) + + self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1] + self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1] + + self.activation = build_activation_layer(act_cfg) + + num_output = self.out_channels * input_feat_shape ** 2 + if self.with_proj: + self.fc_layer = nn.Linear(num_output, self.out_channels) + self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1] + + def forward(self, param_feature, input_feature): + """Forward function for `DynamicConv`. + + Args: + param_feature (Tensor): The feature can be used + to generate the parameter, has shape + (num_all_proposals, in_channels). + input_feature (Tensor): Feature that + interact with parameters, has shape + (num_all_proposals, in_channels, H, W). + + Returns: + Tensor: The output feature has shape + (num_all_proposals, out_channels). + """ + input_feature = input_feature.flatten(2).permute(2, 0, 1) + + input_feature = input_feature.permute(1, 0, 2) + parameters = self.dynamic_layer(param_feature) + + param_in = parameters[:, :self.num_params_in].view( + -1, self.in_channels, self.feat_channels) + param_out = parameters[:, -self.num_params_out:].view( + -1, self.feat_channels, self.out_channels) + + # input_feature has shape (num_all_proposals, H*W, in_channels) + # param_in has shape (num_all_proposals, in_channels, feat_channels) + # feature has shape (num_all_proposals, H*W, feat_channels) + features = torch.bmm(input_feature, param_in) + features = self.norm_in(features) + features = self.activation(features) + + # param_out has shape (batch_size, feat_channels, out_channels) + features = torch.bmm(features, param_out) + features = self.norm_out(features) + features = self.activation(features) + + if self.with_proj: + features = features.flatten(1) + features = self.fc_layer(features) + features = self.fc_norm(features) + features = self.activation(features) + + return features diff --git a/segmentation/ops_dcnv3/DCNv3.egg-info/PKG-INFO b/segmentation/ops_dcnv3/DCNv3.egg-info/PKG-INFO new file mode 100644 index 0000000000000000000000000000000000000000..f19b648aeb48666c6371cc437ba72d9709dec17e --- /dev/null +++ b/segmentation/ops_dcnv3/DCNv3.egg-info/PKG-INFO @@ -0,0 +1,11 @@ +Metadata-Version: 2.1 +Name: DCNv3 +Version: 1.0 +Summary: PyTorch Wrapper for CUDA Functions of DCNv3 +Home-page: https://github.com/OpenGVLab/InternImage +Author: InternImage +License: UNKNOWN +Platform: UNKNOWN + +UNKNOWN + diff --git a/segmentation/ops_dcnv3/DCNv3.egg-info/SOURCES.txt b/segmentation/ops_dcnv3/DCNv3.egg-info/SOURCES.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f1e7a19426e597d667383d301261f28c1450047 --- /dev/null +++ b/segmentation/ops_dcnv3/DCNv3.egg-info/SOURCES.txt @@ -0,0 +1,12 @@ +setup.py +/pasteur/u/yiming/homework4/segmentation/ops_dcnv3/src/vision.cpp +/pasteur/u/yiming/homework4/segmentation/ops_dcnv3/src/cpu/dcnv3_cpu.cpp +/pasteur/u/yiming/homework4/segmentation/ops_dcnv3/src/cuda/dcnv3_cuda.cu +DCNv3.egg-info/PKG-INFO +DCNv3.egg-info/SOURCES.txt +DCNv3.egg-info/dependency_links.txt +DCNv3.egg-info/top_level.txt +functions/__init__.py +functions/dcnv3_func.py +modules/__init__.py +modules/dcnv3.py \ No newline at end of file diff --git a/segmentation/ops_dcnv3/DCNv3.egg-info/dependency_links.txt b/segmentation/ops_dcnv3/DCNv3.egg-info/dependency_links.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/segmentation/ops_dcnv3/DCNv3.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/segmentation/ops_dcnv3/DCNv3.egg-info/top_level.txt b/segmentation/ops_dcnv3/DCNv3.egg-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..824e2116881af6a1dd36c0fd2d1b0916b0e951d7 --- /dev/null +++ b/segmentation/ops_dcnv3/DCNv3.egg-info/top_level.txt @@ -0,0 +1,3 @@ +DCNv3 +functions +modules diff --git a/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/functions/__init__.py b/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..063487930895bf7b53bac670cd3d69d570b85833 --- /dev/null +++ b/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/functions/__init__.py @@ -0,0 +1,7 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .dcnv3_func import DCNv3Function, dcnv3_core_pytorch diff --git a/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/functions/dcnv3_func.py b/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/functions/dcnv3_func.py new file mode 100644 index 0000000000000000000000000000000000000000..198e70c8021ee0d5fa655dad99007965e3ebc46a --- /dev/null +++ b/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/functions/dcnv3_func.py @@ -0,0 +1,187 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +import DCNv3 +import torch +import torch.nn.functional as F +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.cuda.amp import custom_bwd, custom_fwd + + +class DCNv3Function(Function): + @staticmethod + @custom_fwd + def forward( + ctx, input, offset, mask, + kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, + group, group_channels, offset_scale, im2col_step): + ctx.kernel_h = kernel_h + ctx.kernel_w = kernel_w + ctx.stride_h = stride_h + ctx.stride_w = stride_w + ctx.pad_h = pad_h + ctx.pad_w = pad_w + ctx.dilation_h = dilation_h + ctx.dilation_w = dilation_w + ctx.group = group + ctx.group_channels = group_channels + ctx.offset_scale = offset_scale + ctx.im2col_step = im2col_step + output = DCNv3.dcnv3_forward( + input, offset, mask, kernel_h, + kernel_w, stride_h, stride_w, pad_h, + pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, ctx.im2col_step) + ctx.save_for_backward(input, offset, mask) + + return output + + @staticmethod + @once_differentiable + @custom_bwd + def backward(ctx, grad_output): + input, offset, mask = ctx.saved_tensors + grad_input, grad_offset, grad_mask = \ + DCNv3.dcnv3_backward( + input, offset, mask, ctx.kernel_h, + ctx.kernel_w, ctx.stride_h, ctx.stride_w, ctx.pad_h, + ctx.pad_w, ctx.dilation_h, ctx.dilation_w, ctx.group, + ctx.group_channels, ctx.offset_scale, grad_output.contiguous(), ctx.im2col_step) + + return grad_input, grad_offset, grad_mask, \ + None, None, None, None, None, None, None, None, None, None, None, None + + @staticmethod + def symbolic(g, input, offset, mask, kernel_h, kernel_w, stride_h, + stride_w, pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, im2col_step): + """Symbolic function for mmdeploy::DCNv3. + + Returns: + DCNv3 op for onnx. + """ + return g.op( + 'mmdeploy::TRTDCNv3', + input, + offset, + mask, + kernel_h_i=int(kernel_h), + kernel_w_i=int(kernel_w), + stride_h_i=int(stride_h), + stride_w_i=int(stride_w), + pad_h_i=int(pad_h), + pad_w_i=int(pad_w), + dilation_h_i=int(dilation_h), + dilation_w_i=int(dilation_w), + group_i=int(group), + group_channels_i=int(group_channels), + offset_scale_f=float(offset_scale), + im2col_step_i=int(im2col_step), + ) + + +def _get_reference_points(spatial_shapes, device, kernel_h, kernel_w, dilation_h, dilation_w, pad_h=0, pad_w=0, stride_h=1, stride_w=1): + _, H_, W_, _ = spatial_shapes + H_out = (H_ - (dilation_h * (kernel_h - 1) + 1)) // stride_h + 1 + W_out = (W_ - (dilation_w * (kernel_w - 1) + 1)) // stride_w + 1 + + ref_y, ref_x = torch.meshgrid( + torch.linspace( + # pad_h + 0.5, + # H_ - pad_h - 0.5, + (dilation_h * (kernel_h - 1)) // 2 + 0.5, + (dilation_h * (kernel_h - 1)) // 2 + 0.5 + (H_out - 1) * stride_h, + H_out, + dtype=torch.float32, + device=device), + torch.linspace( + # pad_w + 0.5, + # W_ - pad_w - 0.5, + (dilation_w * (kernel_w - 1)) // 2 + 0.5, + (dilation_w * (kernel_w - 1)) // 2 + 0.5 + (W_out - 1) * stride_w, + W_out, + dtype=torch.float32, + device=device)) + ref_y = ref_y.reshape(-1)[None] / H_ + ref_x = ref_x.reshape(-1)[None] / W_ + + ref = torch.stack((ref_x, ref_y), -1).reshape( + 1, H_out, W_out, 1, 2) + + return ref + + +def _generate_dilation_grids(spatial_shapes, kernel_h, kernel_w, dilation_h, dilation_w, group, device): + _, H_, W_, _ = spatial_shapes + points_list = [] + x, y = torch.meshgrid( + torch.linspace( + -((dilation_w * (kernel_w - 1)) // 2), + -((dilation_w * (kernel_w - 1)) // 2) + + (kernel_w - 1) * dilation_w, kernel_w, + dtype=torch.float32, + device=device), + torch.linspace( + -((dilation_h * (kernel_h - 1)) // 2), + -((dilation_h * (kernel_h - 1)) // 2) + + (kernel_h - 1) * dilation_h, kernel_h, + dtype=torch.float32, + device=device)) + + points_list.extend([x / W_, y / H_]) + grid = torch.stack(points_list, -1).reshape(-1, 1, 2).\ + repeat(1, group, 1).permute(1, 0, 2) + grid = grid.reshape(1, 1, 1, group * kernel_h * kernel_w, 2) + + return grid + + +def dcnv3_core_pytorch( + input, offset, mask, kernel_h, + kernel_w, stride_h, stride_w, pad_h, + pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale): + # for debug and test only, + # need to use cuda version instead + input = F.pad( + input, + [0, 0, pad_h, pad_h, pad_w, pad_w]) + N_, H_in, W_in, _ = input.shape + _, H_out, W_out, _ = offset.shape + + ref = _get_reference_points( + input.shape, input.device, kernel_h, kernel_w, dilation_h, dilation_w, pad_h, pad_w, stride_h, stride_w) + grid = _generate_dilation_grids( + input.shape, kernel_h, kernel_w, dilation_h, dilation_w, group, input.device) + spatial_norm = torch.tensor([W_in, H_in]).reshape(1, 1, 1, 2).\ + repeat(1, 1, 1, group*kernel_h*kernel_w).to(input.device) + + sampling_locations = (ref + grid * offset_scale).repeat(N_, 1, 1, 1, 1).flatten(3, 4) + \ + offset * offset_scale / spatial_norm + + P_ = kernel_h * kernel_w + sampling_grids = 2 * sampling_locations - 1 + # N_, H_in, W_in, group*group_channels -> N_, H_in*W_in, group*group_channels -> N_, group*group_channels, H_in*W_in -> N_*group, group_channels, H_in, W_in + input_ = input.view(N_, H_in*W_in, group*group_channels).transpose(1, 2).\ + reshape(N_*group, group_channels, H_in, W_in) + # N_, H_out, W_out, group*P_*2 -> N_, H_out*W_out, group, P_, 2 -> N_, group, H_out*W_out, P_, 2 -> N_*group, H_out*W_out, P_, 2 + sampling_grid_ = sampling_grids.view(N_, H_out*W_out, group, P_, 2).transpose(1, 2).\ + flatten(0, 1) + # N_*group, group_channels, H_out*W_out, P_ + sampling_input_ = F.grid_sample( + input_, sampling_grid_, mode='bilinear', padding_mode='zeros', align_corners=False) + + # (N_, H_out, W_out, group*P_) -> N_, H_out*W_out, group, P_ -> (N_, group, H_out*W_out, P_) -> (N_*group, 1, H_out*W_out, P_) + mask = mask.view(N_, H_out*W_out, group, P_).transpose(1, 2).\ + reshape(N_*group, 1, H_out*W_out, P_) + output = (sampling_input_ * mask).sum(-1).view(N_, + group*group_channels, H_out*W_out) + + return output.transpose(1, 2).reshape(N_, H_out, W_out, -1).contiguous() diff --git a/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/modules/__init__.py b/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..acb73a867ce93dc1757655181608721aab07028d --- /dev/null +++ b/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/modules/__init__.py @@ -0,0 +1,7 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .dcnv3 import DCNv3, DCNv3_pytorch diff --git a/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/modules/dcnv3.py b/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/modules/dcnv3.py new file mode 100644 index 0000000000000000000000000000000000000000..788b21179b81b0aa8af3df634e13cf6f6460c00b --- /dev/null +++ b/segmentation/ops_dcnv3/build/lib.linux-x86_64-3.9/modules/dcnv3.py @@ -0,0 +1,381 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +import warnings + +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn.init import constant_, xavier_uniform_ + +from ..functions import DCNv3Function, dcnv3_core_pytorch + +try: + from DCNv4.functions import DCNv4Function +except: + warnings.warn('Now, we support DCNv4 in InternImage.') +import math + + +class to_channels_first(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 3, 1, 2) + + +class to_channels_last(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 2, 3, 1) + + +def build_norm_layer(dim, + norm_layer, + in_format='channels_last', + out_format='channels_last', + eps=1e-6): + layers = [] + if norm_layer == 'BN': + if in_format == 'channels_last': + layers.append(to_channels_first()) + layers.append(nn.BatchNorm2d(dim)) + if out_format == 'channels_last': + layers.append(to_channels_last()) + elif norm_layer == 'LN': + if in_format == 'channels_first': + layers.append(to_channels_last()) + layers.append(nn.LayerNorm(dim, eps=eps)) + if out_format == 'channels_first': + layers.append(to_channels_first()) + else: + raise NotImplementedError( + f'build_norm_layer does not support {norm_layer}') + return nn.Sequential(*layers) + + +def build_act_layer(act_layer): + if act_layer == 'ReLU': + return nn.ReLU(inplace=True) + elif act_layer == 'SiLU': + return nn.SiLU(inplace=True) + elif act_layer == 'GELU': + return nn.GELU() + + raise NotImplementedError(f'build_act_layer does not support {act_layer}') + + +def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format(n, type(n))) + + return (n & (n - 1) == 0) and n != 0 + + +class CenterFeatureScaleModule(nn.Module): + def forward(self, + query, + center_feature_scale_proj_weight, + center_feature_scale_proj_bias): + center_feature_scale = F.linear(query, + weight=center_feature_scale_proj_weight, + bias=center_feature_scale_proj_bias).sigmoid() + return center_feature_scale + + +class DCNv3_pytorch(nn.Module): + def __init__( + self, + channels=64, + kernel_size=3, + dw_kernel_size=None, + stride=1, + pad=1, + dilation=1, + group=4, + offset_scale=1.0, + act_layer='GELU', + norm_layer='LN', + center_feature_scale=False): + """ + DCNv3 Module + :param channels + :param kernel_size + :param stride + :param pad + :param dilation + :param group + :param offset_scale + :param act_layer + :param norm_layer + """ + super().__init__() + if channels % group != 0: + raise ValueError( + f'channels must be divisible by group, but got {channels} and {group}') + _d_per_group = channels // group + dw_kernel_size = dw_kernel_size if dw_kernel_size is not None else kernel_size + # you'd better set _d_per_group to a power of 2 which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_group): + warnings.warn( + "You'd better set channels in DCNv3 to make the dimension of each attention head a power of 2 " + 'which is more efficient in our CUDA implementation.') + + self.offset_scale = offset_scale + self.channels = channels + self.kernel_size = kernel_size + self.dw_kernel_size = dw_kernel_size + self.stride = stride + self.dilation = dilation + self.pad = pad + self.group = group + self.group_channels = channels // group + self.offset_scale = offset_scale + self.center_feature_scale = center_feature_scale + + self.dw_conv = nn.Sequential( + nn.Conv2d( + channels, + channels, + kernel_size=dw_kernel_size, + stride=1, + padding=(dw_kernel_size - 1) // 2, + groups=channels), + build_norm_layer( + channels, + norm_layer, + 'channels_first', + 'channels_last'), + build_act_layer(act_layer)) + self.offset = nn.Linear( + channels, + group * kernel_size * kernel_size * 2) + self.mask = nn.Linear( + channels, + group * kernel_size * kernel_size) + self.input_proj = nn.Linear(channels, channels) + self.output_proj = nn.Linear(channels, channels) + self._reset_parameters() + + if center_feature_scale: + self.center_feature_scale_proj_weight = nn.Parameter( + torch.zeros((group, channels), dtype=torch.float)) + self.center_feature_scale_proj_bias = nn.Parameter( + torch.tensor(0.0, dtype=torch.float).view((1,)).repeat(group, )) + self.center_feature_scale_module = CenterFeatureScaleModule() + + def _reset_parameters(self): + constant_(self.offset.weight.data, 0.) + constant_(self.offset.bias.data, 0.) + constant_(self.mask.weight.data, 0.) + constant_(self.mask.bias.data, 0.) + xavier_uniform_(self.input_proj.weight.data) + constant_(self.input_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, input): + """ + :param query (N, H, W, C) + :return output (N, H, W, C) + """ + N, H, W, _ = input.shape + + x = self.input_proj(input) + x_proj = x + + x1 = input.permute(0, 3, 1, 2) + x1 = self.dw_conv(x1) + offset = self.offset(x1) + mask = self.mask(x1).reshape(N, H, W, self.group, -1) + mask = F.softmax(mask, -1).reshape(N, H, W, -1) + + x = dcnv3_core_pytorch( + x, offset, mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale) + if self.center_feature_scale: + center_feature_scale = self.center_feature_scale_module( + x1, self.center_feature_scale_proj_weight, self.center_feature_scale_proj_bias) + # N, H, W, groups -> N, H, W, groups, 1 -> N, H, W, groups, _d_per_group -> N, H, W, channels + center_feature_scale = center_feature_scale[..., None].repeat( + 1, 1, 1, 1, self.channels // self.group).flatten(-2) + x = x * (1 - center_feature_scale) + x_proj * center_feature_scale + x = self.output_proj(x) + + return x + + +class DCNv3(nn.Module): + def __init__( + self, + channels=64, + kernel_size=3, + dw_kernel_size=None, + stride=1, + pad=1, + dilation=1, + group=4, + offset_scale=1.0, + act_layer='GELU', + norm_layer='LN', + center_feature_scale=False, + use_dcn_v4_op=False, + ): + """ + DCNv3 Module + :param channels + :param kernel_size + :param stride + :param pad + :param dilation + :param group + :param offset_scale + :param act_layer + :param norm_layer + """ + super().__init__() + if channels % group != 0: + raise ValueError( + f'channels must be divisible by group, but got {channels} and {group}') + _d_per_group = channels // group + dw_kernel_size = dw_kernel_size if dw_kernel_size is not None else kernel_size + # you'd better set _d_per_group to a power of 2 which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_group): + warnings.warn( + "You'd better set channels in DCNv3 to make the dimension of each attention head a power of 2 " + 'which is more efficient in our CUDA implementation.') + + self.offset_scale = offset_scale + self.channels = channels + self.kernel_size = kernel_size + self.dw_kernel_size = dw_kernel_size + self.stride = stride + self.dilation = dilation + self.pad = pad + self.group = group + self.group_channels = channels // group + self.offset_scale = offset_scale + self.center_feature_scale = center_feature_scale + + self.use_dcn_v4_op = use_dcn_v4_op + + self.dw_conv = nn.Sequential( + nn.Conv2d( + channels, + channels, + kernel_size=dw_kernel_size, + stride=1, + padding=(dw_kernel_size - 1) // 2, + groups=channels), + build_norm_layer( + channels, + norm_layer, + 'channels_first', + 'channels_last'), + build_act_layer(act_layer)) + self.offset = nn.Linear( + channels, + group * kernel_size * kernel_size * 2) + self.mask = nn.Linear( + channels, + group * kernel_size * kernel_size) + self.input_proj = nn.Linear(channels, channels) + self.output_proj = nn.Linear(channels, channels) + self._reset_parameters() + + if center_feature_scale: + self.center_feature_scale_proj_weight = nn.Parameter( + torch.zeros((group, channels), dtype=torch.float)) + self.center_feature_scale_proj_bias = nn.Parameter( + torch.tensor(0.0, dtype=torch.float).view((1,)).repeat(group, )) + self.center_feature_scale_module = CenterFeatureScaleModule() + + def _reset_parameters(self): + constant_(self.offset.weight.data, 0.) + constant_(self.offset.bias.data, 0.) + constant_(self.mask.weight.data, 0.) + constant_(self.mask.bias.data, 0.) + xavier_uniform_(self.input_proj.weight.data) + constant_(self.input_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, input): + """ + :param query (N, H, W, C) + :return output (N, H, W, C) + """ + N, H, W, _ = input.shape + + x = self.input_proj(input) + x_proj = x + dtype = x.dtype + + x1 = input.permute(0, 3, 1, 2) + x1 = self.dw_conv(x1) + offset = self.offset(x1) + mask = self.mask(x1).reshape(N, H, W, self.group, -1) + + if not self.use_dcn_v4_op: + mask = F.softmax(mask, -1).reshape(N, H, W, -1).type(dtype) + x = DCNv3Function.apply( + x, offset, mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale, + 256) + else: + # DCNv4 combines offset and weight mask into one tensor `offset_mask`. + # The following code is to align DCNv3 and DCNv4 + offset = offset.view(N, H, W, self.group, -1) + mask = F.softmax(mask, -1) + mask = mask.view(N, H, W, self.group, -1) + offset_mask = torch.cat([offset, mask], -1).view(N, H, W, -1).contiguous() + + # For efficiency, the last dimension of the offset_mask tensor in dcnv4 is a multiple of 8. + K3 = offset_mask.size(-1) + K3_pad = int(math.ceil(K3 / 8) * 8) + pad_dim = K3_pad - K3 + offset_mask = torch.cat([offset_mask, offset_mask.new_zeros([*offset_mask.size()[:3], pad_dim])], -1) + + x = DCNv4Function.apply( + x, offset_mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale, + 256, + False + ) + + if self.center_feature_scale: + center_feature_scale = self.center_feature_scale_module( + x1, self.center_feature_scale_proj_weight, self.center_feature_scale_proj_bias) + # N, H, W, groups -> N, H, W, groups, 1 -> N, H, W, groups, _d_per_group -> N, H, W, channels + center_feature_scale = center_feature_scale[..., None].repeat( + 1, 1, 1, 1, self.channels // self.group).flatten(-2) + x = x * (1 - center_feature_scale) + x_proj * center_feature_scale + x = self.output_proj(x) + + return x diff --git a/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/functions/__init__.py b/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..063487930895bf7b53bac670cd3d69d570b85833 --- /dev/null +++ b/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/functions/__init__.py @@ -0,0 +1,7 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .dcnv3_func import DCNv3Function, dcnv3_core_pytorch diff --git a/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/functions/dcnv3_func.py b/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/functions/dcnv3_func.py new file mode 100644 index 0000000000000000000000000000000000000000..198e70c8021ee0d5fa655dad99007965e3ebc46a --- /dev/null +++ b/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/functions/dcnv3_func.py @@ -0,0 +1,187 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +import DCNv3 +import torch +import torch.nn.functional as F +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.cuda.amp import custom_bwd, custom_fwd + + +class DCNv3Function(Function): + @staticmethod + @custom_fwd + def forward( + ctx, input, offset, mask, + kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, + group, group_channels, offset_scale, im2col_step): + ctx.kernel_h = kernel_h + ctx.kernel_w = kernel_w + ctx.stride_h = stride_h + ctx.stride_w = stride_w + ctx.pad_h = pad_h + ctx.pad_w = pad_w + ctx.dilation_h = dilation_h + ctx.dilation_w = dilation_w + ctx.group = group + ctx.group_channels = group_channels + ctx.offset_scale = offset_scale + ctx.im2col_step = im2col_step + output = DCNv3.dcnv3_forward( + input, offset, mask, kernel_h, + kernel_w, stride_h, stride_w, pad_h, + pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, ctx.im2col_step) + ctx.save_for_backward(input, offset, mask) + + return output + + @staticmethod + @once_differentiable + @custom_bwd + def backward(ctx, grad_output): + input, offset, mask = ctx.saved_tensors + grad_input, grad_offset, grad_mask = \ + DCNv3.dcnv3_backward( + input, offset, mask, ctx.kernel_h, + ctx.kernel_w, ctx.stride_h, ctx.stride_w, ctx.pad_h, + ctx.pad_w, ctx.dilation_h, ctx.dilation_w, ctx.group, + ctx.group_channels, ctx.offset_scale, grad_output.contiguous(), ctx.im2col_step) + + return grad_input, grad_offset, grad_mask, \ + None, None, None, None, None, None, None, None, None, None, None, None + + @staticmethod + def symbolic(g, input, offset, mask, kernel_h, kernel_w, stride_h, + stride_w, pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, im2col_step): + """Symbolic function for mmdeploy::DCNv3. + + Returns: + DCNv3 op for onnx. + """ + return g.op( + 'mmdeploy::TRTDCNv3', + input, + offset, + mask, + kernel_h_i=int(kernel_h), + kernel_w_i=int(kernel_w), + stride_h_i=int(stride_h), + stride_w_i=int(stride_w), + pad_h_i=int(pad_h), + pad_w_i=int(pad_w), + dilation_h_i=int(dilation_h), + dilation_w_i=int(dilation_w), + group_i=int(group), + group_channels_i=int(group_channels), + offset_scale_f=float(offset_scale), + im2col_step_i=int(im2col_step), + ) + + +def _get_reference_points(spatial_shapes, device, kernel_h, kernel_w, dilation_h, dilation_w, pad_h=0, pad_w=0, stride_h=1, stride_w=1): + _, H_, W_, _ = spatial_shapes + H_out = (H_ - (dilation_h * (kernel_h - 1) + 1)) // stride_h + 1 + W_out = (W_ - (dilation_w * (kernel_w - 1) + 1)) // stride_w + 1 + + ref_y, ref_x = torch.meshgrid( + torch.linspace( + # pad_h + 0.5, + # H_ - pad_h - 0.5, + (dilation_h * (kernel_h - 1)) // 2 + 0.5, + (dilation_h * (kernel_h - 1)) // 2 + 0.5 + (H_out - 1) * stride_h, + H_out, + dtype=torch.float32, + device=device), + torch.linspace( + # pad_w + 0.5, + # W_ - pad_w - 0.5, + (dilation_w * (kernel_w - 1)) // 2 + 0.5, + (dilation_w * (kernel_w - 1)) // 2 + 0.5 + (W_out - 1) * stride_w, + W_out, + dtype=torch.float32, + device=device)) + ref_y = ref_y.reshape(-1)[None] / H_ + ref_x = ref_x.reshape(-1)[None] / W_ + + ref = torch.stack((ref_x, ref_y), -1).reshape( + 1, H_out, W_out, 1, 2) + + return ref + + +def _generate_dilation_grids(spatial_shapes, kernel_h, kernel_w, dilation_h, dilation_w, group, device): + _, H_, W_, _ = spatial_shapes + points_list = [] + x, y = torch.meshgrid( + torch.linspace( + -((dilation_w * (kernel_w - 1)) // 2), + -((dilation_w * (kernel_w - 1)) // 2) + + (kernel_w - 1) * dilation_w, kernel_w, + dtype=torch.float32, + device=device), + torch.linspace( + -((dilation_h * (kernel_h - 1)) // 2), + -((dilation_h * (kernel_h - 1)) // 2) + + (kernel_h - 1) * dilation_h, kernel_h, + dtype=torch.float32, + device=device)) + + points_list.extend([x / W_, y / H_]) + grid = torch.stack(points_list, -1).reshape(-1, 1, 2).\ + repeat(1, group, 1).permute(1, 0, 2) + grid = grid.reshape(1, 1, 1, group * kernel_h * kernel_w, 2) + + return grid + + +def dcnv3_core_pytorch( + input, offset, mask, kernel_h, + kernel_w, stride_h, stride_w, pad_h, + pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale): + # for debug and test only, + # need to use cuda version instead + input = F.pad( + input, + [0, 0, pad_h, pad_h, pad_w, pad_w]) + N_, H_in, W_in, _ = input.shape + _, H_out, W_out, _ = offset.shape + + ref = _get_reference_points( + input.shape, input.device, kernel_h, kernel_w, dilation_h, dilation_w, pad_h, pad_w, stride_h, stride_w) + grid = _generate_dilation_grids( + input.shape, kernel_h, kernel_w, dilation_h, dilation_w, group, input.device) + spatial_norm = torch.tensor([W_in, H_in]).reshape(1, 1, 1, 2).\ + repeat(1, 1, 1, group*kernel_h*kernel_w).to(input.device) + + sampling_locations = (ref + grid * offset_scale).repeat(N_, 1, 1, 1, 1).flatten(3, 4) + \ + offset * offset_scale / spatial_norm + + P_ = kernel_h * kernel_w + sampling_grids = 2 * sampling_locations - 1 + # N_, H_in, W_in, group*group_channels -> N_, H_in*W_in, group*group_channels -> N_, group*group_channels, H_in*W_in -> N_*group, group_channels, H_in, W_in + input_ = input.view(N_, H_in*W_in, group*group_channels).transpose(1, 2).\ + reshape(N_*group, group_channels, H_in, W_in) + # N_, H_out, W_out, group*P_*2 -> N_, H_out*W_out, group, P_, 2 -> N_, group, H_out*W_out, P_, 2 -> N_*group, H_out*W_out, P_, 2 + sampling_grid_ = sampling_grids.view(N_, H_out*W_out, group, P_, 2).transpose(1, 2).\ + flatten(0, 1) + # N_*group, group_channels, H_out*W_out, P_ + sampling_input_ = F.grid_sample( + input_, sampling_grid_, mode='bilinear', padding_mode='zeros', align_corners=False) + + # (N_, H_out, W_out, group*P_) -> N_, H_out*W_out, group, P_ -> (N_, group, H_out*W_out, P_) -> (N_*group, 1, H_out*W_out, P_) + mask = mask.view(N_, H_out*W_out, group, P_).transpose(1, 2).\ + reshape(N_*group, 1, H_out*W_out, P_) + output = (sampling_input_ * mask).sum(-1).view(N_, + group*group_channels, H_out*W_out) + + return output.transpose(1, 2).reshape(N_, H_out, W_out, -1).contiguous() diff --git a/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/modules/__init__.py b/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..acb73a867ce93dc1757655181608721aab07028d --- /dev/null +++ b/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/modules/__init__.py @@ -0,0 +1,7 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .dcnv3 import DCNv3, DCNv3_pytorch diff --git a/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/modules/dcnv3.py b/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/modules/dcnv3.py new file mode 100644 index 0000000000000000000000000000000000000000..788b21179b81b0aa8af3df634e13cf6f6460c00b --- /dev/null +++ b/segmentation/ops_dcnv3/build/lib.linux-x86_64-cpython-39/modules/dcnv3.py @@ -0,0 +1,381 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +import warnings + +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn.init import constant_, xavier_uniform_ + +from ..functions import DCNv3Function, dcnv3_core_pytorch + +try: + from DCNv4.functions import DCNv4Function +except: + warnings.warn('Now, we support DCNv4 in InternImage.') +import math + + +class to_channels_first(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 3, 1, 2) + + +class to_channels_last(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 2, 3, 1) + + +def build_norm_layer(dim, + norm_layer, + in_format='channels_last', + out_format='channels_last', + eps=1e-6): + layers = [] + if norm_layer == 'BN': + if in_format == 'channels_last': + layers.append(to_channels_first()) + layers.append(nn.BatchNorm2d(dim)) + if out_format == 'channels_last': + layers.append(to_channels_last()) + elif norm_layer == 'LN': + if in_format == 'channels_first': + layers.append(to_channels_last()) + layers.append(nn.LayerNorm(dim, eps=eps)) + if out_format == 'channels_first': + layers.append(to_channels_first()) + else: + raise NotImplementedError( + f'build_norm_layer does not support {norm_layer}') + return nn.Sequential(*layers) + + +def build_act_layer(act_layer): + if act_layer == 'ReLU': + return nn.ReLU(inplace=True) + elif act_layer == 'SiLU': + return nn.SiLU(inplace=True) + elif act_layer == 'GELU': + return nn.GELU() + + raise NotImplementedError(f'build_act_layer does not support {act_layer}') + + +def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format(n, type(n))) + + return (n & (n - 1) == 0) and n != 0 + + +class CenterFeatureScaleModule(nn.Module): + def forward(self, + query, + center_feature_scale_proj_weight, + center_feature_scale_proj_bias): + center_feature_scale = F.linear(query, + weight=center_feature_scale_proj_weight, + bias=center_feature_scale_proj_bias).sigmoid() + return center_feature_scale + + +class DCNv3_pytorch(nn.Module): + def __init__( + self, + channels=64, + kernel_size=3, + dw_kernel_size=None, + stride=1, + pad=1, + dilation=1, + group=4, + offset_scale=1.0, + act_layer='GELU', + norm_layer='LN', + center_feature_scale=False): + """ + DCNv3 Module + :param channels + :param kernel_size + :param stride + :param pad + :param dilation + :param group + :param offset_scale + :param act_layer + :param norm_layer + """ + super().__init__() + if channels % group != 0: + raise ValueError( + f'channels must be divisible by group, but got {channels} and {group}') + _d_per_group = channels // group + dw_kernel_size = dw_kernel_size if dw_kernel_size is not None else kernel_size + # you'd better set _d_per_group to a power of 2 which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_group): + warnings.warn( + "You'd better set channels in DCNv3 to make the dimension of each attention head a power of 2 " + 'which is more efficient in our CUDA implementation.') + + self.offset_scale = offset_scale + self.channels = channels + self.kernel_size = kernel_size + self.dw_kernel_size = dw_kernel_size + self.stride = stride + self.dilation = dilation + self.pad = pad + self.group = group + self.group_channels = channels // group + self.offset_scale = offset_scale + self.center_feature_scale = center_feature_scale + + self.dw_conv = nn.Sequential( + nn.Conv2d( + channels, + channels, + kernel_size=dw_kernel_size, + stride=1, + padding=(dw_kernel_size - 1) // 2, + groups=channels), + build_norm_layer( + channels, + norm_layer, + 'channels_first', + 'channels_last'), + build_act_layer(act_layer)) + self.offset = nn.Linear( + channels, + group * kernel_size * kernel_size * 2) + self.mask = nn.Linear( + channels, + group * kernel_size * kernel_size) + self.input_proj = nn.Linear(channels, channels) + self.output_proj = nn.Linear(channels, channels) + self._reset_parameters() + + if center_feature_scale: + self.center_feature_scale_proj_weight = nn.Parameter( + torch.zeros((group, channels), dtype=torch.float)) + self.center_feature_scale_proj_bias = nn.Parameter( + torch.tensor(0.0, dtype=torch.float).view((1,)).repeat(group, )) + self.center_feature_scale_module = CenterFeatureScaleModule() + + def _reset_parameters(self): + constant_(self.offset.weight.data, 0.) + constant_(self.offset.bias.data, 0.) + constant_(self.mask.weight.data, 0.) + constant_(self.mask.bias.data, 0.) + xavier_uniform_(self.input_proj.weight.data) + constant_(self.input_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, input): + """ + :param query (N, H, W, C) + :return output (N, H, W, C) + """ + N, H, W, _ = input.shape + + x = self.input_proj(input) + x_proj = x + + x1 = input.permute(0, 3, 1, 2) + x1 = self.dw_conv(x1) + offset = self.offset(x1) + mask = self.mask(x1).reshape(N, H, W, self.group, -1) + mask = F.softmax(mask, -1).reshape(N, H, W, -1) + + x = dcnv3_core_pytorch( + x, offset, mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale) + if self.center_feature_scale: + center_feature_scale = self.center_feature_scale_module( + x1, self.center_feature_scale_proj_weight, self.center_feature_scale_proj_bias) + # N, H, W, groups -> N, H, W, groups, 1 -> N, H, W, groups, _d_per_group -> N, H, W, channels + center_feature_scale = center_feature_scale[..., None].repeat( + 1, 1, 1, 1, self.channels // self.group).flatten(-2) + x = x * (1 - center_feature_scale) + x_proj * center_feature_scale + x = self.output_proj(x) + + return x + + +class DCNv3(nn.Module): + def __init__( + self, + channels=64, + kernel_size=3, + dw_kernel_size=None, + stride=1, + pad=1, + dilation=1, + group=4, + offset_scale=1.0, + act_layer='GELU', + norm_layer='LN', + center_feature_scale=False, + use_dcn_v4_op=False, + ): + """ + DCNv3 Module + :param channels + :param kernel_size + :param stride + :param pad + :param dilation + :param group + :param offset_scale + :param act_layer + :param norm_layer + """ + super().__init__() + if channels % group != 0: + raise ValueError( + f'channels must be divisible by group, but got {channels} and {group}') + _d_per_group = channels // group + dw_kernel_size = dw_kernel_size if dw_kernel_size is not None else kernel_size + # you'd better set _d_per_group to a power of 2 which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_group): + warnings.warn( + "You'd better set channels in DCNv3 to make the dimension of each attention head a power of 2 " + 'which is more efficient in our CUDA implementation.') + + self.offset_scale = offset_scale + self.channels = channels + self.kernel_size = kernel_size + self.dw_kernel_size = dw_kernel_size + self.stride = stride + self.dilation = dilation + self.pad = pad + self.group = group + self.group_channels = channels // group + self.offset_scale = offset_scale + self.center_feature_scale = center_feature_scale + + self.use_dcn_v4_op = use_dcn_v4_op + + self.dw_conv = nn.Sequential( + nn.Conv2d( + channels, + channels, + kernel_size=dw_kernel_size, + stride=1, + padding=(dw_kernel_size - 1) // 2, + groups=channels), + build_norm_layer( + channels, + norm_layer, + 'channels_first', + 'channels_last'), + build_act_layer(act_layer)) + self.offset = nn.Linear( + channels, + group * kernel_size * kernel_size * 2) + self.mask = nn.Linear( + channels, + group * kernel_size * kernel_size) + self.input_proj = nn.Linear(channels, channels) + self.output_proj = nn.Linear(channels, channels) + self._reset_parameters() + + if center_feature_scale: + self.center_feature_scale_proj_weight = nn.Parameter( + torch.zeros((group, channels), dtype=torch.float)) + self.center_feature_scale_proj_bias = nn.Parameter( + torch.tensor(0.0, dtype=torch.float).view((1,)).repeat(group, )) + self.center_feature_scale_module = CenterFeatureScaleModule() + + def _reset_parameters(self): + constant_(self.offset.weight.data, 0.) + constant_(self.offset.bias.data, 0.) + constant_(self.mask.weight.data, 0.) + constant_(self.mask.bias.data, 0.) + xavier_uniform_(self.input_proj.weight.data) + constant_(self.input_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, input): + """ + :param query (N, H, W, C) + :return output (N, H, W, C) + """ + N, H, W, _ = input.shape + + x = self.input_proj(input) + x_proj = x + dtype = x.dtype + + x1 = input.permute(0, 3, 1, 2) + x1 = self.dw_conv(x1) + offset = self.offset(x1) + mask = self.mask(x1).reshape(N, H, W, self.group, -1) + + if not self.use_dcn_v4_op: + mask = F.softmax(mask, -1).reshape(N, H, W, -1).type(dtype) + x = DCNv3Function.apply( + x, offset, mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale, + 256) + else: + # DCNv4 combines offset and weight mask into one tensor `offset_mask`. + # The following code is to align DCNv3 and DCNv4 + offset = offset.view(N, H, W, self.group, -1) + mask = F.softmax(mask, -1) + mask = mask.view(N, H, W, self.group, -1) + offset_mask = torch.cat([offset, mask], -1).view(N, H, W, -1).contiguous() + + # For efficiency, the last dimension of the offset_mask tensor in dcnv4 is a multiple of 8. + K3 = offset_mask.size(-1) + K3_pad = int(math.ceil(K3 / 8) * 8) + pad_dim = K3_pad - K3 + offset_mask = torch.cat([offset_mask, offset_mask.new_zeros([*offset_mask.size()[:3], pad_dim])], -1) + + x = DCNv4Function.apply( + x, offset_mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale, + 256, + False + ) + + if self.center_feature_scale: + center_feature_scale = self.center_feature_scale_module( + x1, self.center_feature_scale_proj_weight, self.center_feature_scale_proj_bias) + # N, H, W, groups -> N, H, W, groups, 1 -> N, H, W, groups, _d_per_group -> N, H, W, channels + center_feature_scale = center_feature_scale[..., None].repeat( + 1, 1, 1, 1, self.channels // self.group).flatten(-2) + x = x * (1 - center_feature_scale) + x_proj * center_feature_scale + x = self.output_proj(x) + + return x diff --git a/segmentation/ops_dcnv3/functions/__init__.py b/segmentation/ops_dcnv3/functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..063487930895bf7b53bac670cd3d69d570b85833 --- /dev/null +++ b/segmentation/ops_dcnv3/functions/__init__.py @@ -0,0 +1,7 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .dcnv3_func import DCNv3Function, dcnv3_core_pytorch diff --git a/segmentation/ops_dcnv3/functions/__pycache__/__init__.cpython-39.pyc b/segmentation/ops_dcnv3/functions/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..273f8706bc77f9f9c0721c2bac9f4022426d7d37 Binary files /dev/null and b/segmentation/ops_dcnv3/functions/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/ops_dcnv3/functions/__pycache__/dcnv3_func.cpython-39.pyc b/segmentation/ops_dcnv3/functions/__pycache__/dcnv3_func.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b638fed8cda3fe20629a3cf50dbe2a42bb44e97a Binary files /dev/null and b/segmentation/ops_dcnv3/functions/__pycache__/dcnv3_func.cpython-39.pyc differ diff --git a/segmentation/ops_dcnv3/functions/dcnv3_func.py b/segmentation/ops_dcnv3/functions/dcnv3_func.py new file mode 100644 index 0000000000000000000000000000000000000000..198e70c8021ee0d5fa655dad99007965e3ebc46a --- /dev/null +++ b/segmentation/ops_dcnv3/functions/dcnv3_func.py @@ -0,0 +1,187 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +import DCNv3 +import torch +import torch.nn.functional as F +from torch.autograd import Function +from torch.autograd.function import once_differentiable +from torch.cuda.amp import custom_bwd, custom_fwd + + +class DCNv3Function(Function): + @staticmethod + @custom_fwd + def forward( + ctx, input, offset, mask, + kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, + group, group_channels, offset_scale, im2col_step): + ctx.kernel_h = kernel_h + ctx.kernel_w = kernel_w + ctx.stride_h = stride_h + ctx.stride_w = stride_w + ctx.pad_h = pad_h + ctx.pad_w = pad_w + ctx.dilation_h = dilation_h + ctx.dilation_w = dilation_w + ctx.group = group + ctx.group_channels = group_channels + ctx.offset_scale = offset_scale + ctx.im2col_step = im2col_step + output = DCNv3.dcnv3_forward( + input, offset, mask, kernel_h, + kernel_w, stride_h, stride_w, pad_h, + pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, ctx.im2col_step) + ctx.save_for_backward(input, offset, mask) + + return output + + @staticmethod + @once_differentiable + @custom_bwd + def backward(ctx, grad_output): + input, offset, mask = ctx.saved_tensors + grad_input, grad_offset, grad_mask = \ + DCNv3.dcnv3_backward( + input, offset, mask, ctx.kernel_h, + ctx.kernel_w, ctx.stride_h, ctx.stride_w, ctx.pad_h, + ctx.pad_w, ctx.dilation_h, ctx.dilation_w, ctx.group, + ctx.group_channels, ctx.offset_scale, grad_output.contiguous(), ctx.im2col_step) + + return grad_input, grad_offset, grad_mask, \ + None, None, None, None, None, None, None, None, None, None, None, None + + @staticmethod + def symbolic(g, input, offset, mask, kernel_h, kernel_w, stride_h, + stride_w, pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale, im2col_step): + """Symbolic function for mmdeploy::DCNv3. + + Returns: + DCNv3 op for onnx. + """ + return g.op( + 'mmdeploy::TRTDCNv3', + input, + offset, + mask, + kernel_h_i=int(kernel_h), + kernel_w_i=int(kernel_w), + stride_h_i=int(stride_h), + stride_w_i=int(stride_w), + pad_h_i=int(pad_h), + pad_w_i=int(pad_w), + dilation_h_i=int(dilation_h), + dilation_w_i=int(dilation_w), + group_i=int(group), + group_channels_i=int(group_channels), + offset_scale_f=float(offset_scale), + im2col_step_i=int(im2col_step), + ) + + +def _get_reference_points(spatial_shapes, device, kernel_h, kernel_w, dilation_h, dilation_w, pad_h=0, pad_w=0, stride_h=1, stride_w=1): + _, H_, W_, _ = spatial_shapes + H_out = (H_ - (dilation_h * (kernel_h - 1) + 1)) // stride_h + 1 + W_out = (W_ - (dilation_w * (kernel_w - 1) + 1)) // stride_w + 1 + + ref_y, ref_x = torch.meshgrid( + torch.linspace( + # pad_h + 0.5, + # H_ - pad_h - 0.5, + (dilation_h * (kernel_h - 1)) // 2 + 0.5, + (dilation_h * (kernel_h - 1)) // 2 + 0.5 + (H_out - 1) * stride_h, + H_out, + dtype=torch.float32, + device=device), + torch.linspace( + # pad_w + 0.5, + # W_ - pad_w - 0.5, + (dilation_w * (kernel_w - 1)) // 2 + 0.5, + (dilation_w * (kernel_w - 1)) // 2 + 0.5 + (W_out - 1) * stride_w, + W_out, + dtype=torch.float32, + device=device)) + ref_y = ref_y.reshape(-1)[None] / H_ + ref_x = ref_x.reshape(-1)[None] / W_ + + ref = torch.stack((ref_x, ref_y), -1).reshape( + 1, H_out, W_out, 1, 2) + + return ref + + +def _generate_dilation_grids(spatial_shapes, kernel_h, kernel_w, dilation_h, dilation_w, group, device): + _, H_, W_, _ = spatial_shapes + points_list = [] + x, y = torch.meshgrid( + torch.linspace( + -((dilation_w * (kernel_w - 1)) // 2), + -((dilation_w * (kernel_w - 1)) // 2) + + (kernel_w - 1) * dilation_w, kernel_w, + dtype=torch.float32, + device=device), + torch.linspace( + -((dilation_h * (kernel_h - 1)) // 2), + -((dilation_h * (kernel_h - 1)) // 2) + + (kernel_h - 1) * dilation_h, kernel_h, + dtype=torch.float32, + device=device)) + + points_list.extend([x / W_, y / H_]) + grid = torch.stack(points_list, -1).reshape(-1, 1, 2).\ + repeat(1, group, 1).permute(1, 0, 2) + grid = grid.reshape(1, 1, 1, group * kernel_h * kernel_w, 2) + + return grid + + +def dcnv3_core_pytorch( + input, offset, mask, kernel_h, + kernel_w, stride_h, stride_w, pad_h, + pad_w, dilation_h, dilation_w, group, + group_channels, offset_scale): + # for debug and test only, + # need to use cuda version instead + input = F.pad( + input, + [0, 0, pad_h, pad_h, pad_w, pad_w]) + N_, H_in, W_in, _ = input.shape + _, H_out, W_out, _ = offset.shape + + ref = _get_reference_points( + input.shape, input.device, kernel_h, kernel_w, dilation_h, dilation_w, pad_h, pad_w, stride_h, stride_w) + grid = _generate_dilation_grids( + input.shape, kernel_h, kernel_w, dilation_h, dilation_w, group, input.device) + spatial_norm = torch.tensor([W_in, H_in]).reshape(1, 1, 1, 2).\ + repeat(1, 1, 1, group*kernel_h*kernel_w).to(input.device) + + sampling_locations = (ref + grid * offset_scale).repeat(N_, 1, 1, 1, 1).flatten(3, 4) + \ + offset * offset_scale / spatial_norm + + P_ = kernel_h * kernel_w + sampling_grids = 2 * sampling_locations - 1 + # N_, H_in, W_in, group*group_channels -> N_, H_in*W_in, group*group_channels -> N_, group*group_channels, H_in*W_in -> N_*group, group_channels, H_in, W_in + input_ = input.view(N_, H_in*W_in, group*group_channels).transpose(1, 2).\ + reshape(N_*group, group_channels, H_in, W_in) + # N_, H_out, W_out, group*P_*2 -> N_, H_out*W_out, group, P_, 2 -> N_, group, H_out*W_out, P_, 2 -> N_*group, H_out*W_out, P_, 2 + sampling_grid_ = sampling_grids.view(N_, H_out*W_out, group, P_, 2).transpose(1, 2).\ + flatten(0, 1) + # N_*group, group_channels, H_out*W_out, P_ + sampling_input_ = F.grid_sample( + input_, sampling_grid_, mode='bilinear', padding_mode='zeros', align_corners=False) + + # (N_, H_out, W_out, group*P_) -> N_, H_out*W_out, group, P_ -> (N_, group, H_out*W_out, P_) -> (N_*group, 1, H_out*W_out, P_) + mask = mask.view(N_, H_out*W_out, group, P_).transpose(1, 2).\ + reshape(N_*group, 1, H_out*W_out, P_) + output = (sampling_input_ * mask).sum(-1).view(N_, + group*group_channels, H_out*W_out) + + return output.transpose(1, 2).reshape(N_, H_out, W_out, -1).contiguous() diff --git a/segmentation/ops_dcnv3/make.sh b/segmentation/ops_dcnv3/make.sh new file mode 100644 index 0000000000000000000000000000000000000000..9a501794748cb190c2abe293a86dccbc46f3e131 --- /dev/null +++ b/segmentation/ops_dcnv3/make.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +python setup.py build install diff --git a/segmentation/ops_dcnv3/modules/__init__.py b/segmentation/ops_dcnv3/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..acb73a867ce93dc1757655181608721aab07028d --- /dev/null +++ b/segmentation/ops_dcnv3/modules/__init__.py @@ -0,0 +1,7 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from .dcnv3 import DCNv3, DCNv3_pytorch diff --git a/segmentation/ops_dcnv3/modules/__pycache__/__init__.cpython-39.pyc b/segmentation/ops_dcnv3/modules/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4f77ac24c28f5efea6fcdefb356f84965b06059 Binary files /dev/null and b/segmentation/ops_dcnv3/modules/__pycache__/__init__.cpython-39.pyc differ diff --git a/segmentation/ops_dcnv3/modules/__pycache__/dcnv3.cpython-39.pyc b/segmentation/ops_dcnv3/modules/__pycache__/dcnv3.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25084b82948e544a3b863194fc467700e39648cf Binary files /dev/null and b/segmentation/ops_dcnv3/modules/__pycache__/dcnv3.cpython-39.pyc differ diff --git a/segmentation/ops_dcnv3/modules/dcnv3.py b/segmentation/ops_dcnv3/modules/dcnv3.py new file mode 100644 index 0000000000000000000000000000000000000000..788b21179b81b0aa8af3df634e13cf6f6460c00b --- /dev/null +++ b/segmentation/ops_dcnv3/modules/dcnv3.py @@ -0,0 +1,381 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +import warnings + +import torch +import torch.nn.functional as F +from torch import nn +from torch.nn.init import constant_, xavier_uniform_ + +from ..functions import DCNv3Function, dcnv3_core_pytorch + +try: + from DCNv4.functions import DCNv4Function +except: + warnings.warn('Now, we support DCNv4 in InternImage.') +import math + + +class to_channels_first(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 3, 1, 2) + + +class to_channels_last(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.permute(0, 2, 3, 1) + + +def build_norm_layer(dim, + norm_layer, + in_format='channels_last', + out_format='channels_last', + eps=1e-6): + layers = [] + if norm_layer == 'BN': + if in_format == 'channels_last': + layers.append(to_channels_first()) + layers.append(nn.BatchNorm2d(dim)) + if out_format == 'channels_last': + layers.append(to_channels_last()) + elif norm_layer == 'LN': + if in_format == 'channels_first': + layers.append(to_channels_last()) + layers.append(nn.LayerNorm(dim, eps=eps)) + if out_format == 'channels_first': + layers.append(to_channels_first()) + else: + raise NotImplementedError( + f'build_norm_layer does not support {norm_layer}') + return nn.Sequential(*layers) + + +def build_act_layer(act_layer): + if act_layer == 'ReLU': + return nn.ReLU(inplace=True) + elif act_layer == 'SiLU': + return nn.SiLU(inplace=True) + elif act_layer == 'GELU': + return nn.GELU() + + raise NotImplementedError(f'build_act_layer does not support {act_layer}') + + +def _is_power_of_2(n): + if (not isinstance(n, int)) or (n < 0): + raise ValueError( + 'invalid input for _is_power_of_2: {} (type: {})'.format(n, type(n))) + + return (n & (n - 1) == 0) and n != 0 + + +class CenterFeatureScaleModule(nn.Module): + def forward(self, + query, + center_feature_scale_proj_weight, + center_feature_scale_proj_bias): + center_feature_scale = F.linear(query, + weight=center_feature_scale_proj_weight, + bias=center_feature_scale_proj_bias).sigmoid() + return center_feature_scale + + +class DCNv3_pytorch(nn.Module): + def __init__( + self, + channels=64, + kernel_size=3, + dw_kernel_size=None, + stride=1, + pad=1, + dilation=1, + group=4, + offset_scale=1.0, + act_layer='GELU', + norm_layer='LN', + center_feature_scale=False): + """ + DCNv3 Module + :param channels + :param kernel_size + :param stride + :param pad + :param dilation + :param group + :param offset_scale + :param act_layer + :param norm_layer + """ + super().__init__() + if channels % group != 0: + raise ValueError( + f'channels must be divisible by group, but got {channels} and {group}') + _d_per_group = channels // group + dw_kernel_size = dw_kernel_size if dw_kernel_size is not None else kernel_size + # you'd better set _d_per_group to a power of 2 which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_group): + warnings.warn( + "You'd better set channels in DCNv3 to make the dimension of each attention head a power of 2 " + 'which is more efficient in our CUDA implementation.') + + self.offset_scale = offset_scale + self.channels = channels + self.kernel_size = kernel_size + self.dw_kernel_size = dw_kernel_size + self.stride = stride + self.dilation = dilation + self.pad = pad + self.group = group + self.group_channels = channels // group + self.offset_scale = offset_scale + self.center_feature_scale = center_feature_scale + + self.dw_conv = nn.Sequential( + nn.Conv2d( + channels, + channels, + kernel_size=dw_kernel_size, + stride=1, + padding=(dw_kernel_size - 1) // 2, + groups=channels), + build_norm_layer( + channels, + norm_layer, + 'channels_first', + 'channels_last'), + build_act_layer(act_layer)) + self.offset = nn.Linear( + channels, + group * kernel_size * kernel_size * 2) + self.mask = nn.Linear( + channels, + group * kernel_size * kernel_size) + self.input_proj = nn.Linear(channels, channels) + self.output_proj = nn.Linear(channels, channels) + self._reset_parameters() + + if center_feature_scale: + self.center_feature_scale_proj_weight = nn.Parameter( + torch.zeros((group, channels), dtype=torch.float)) + self.center_feature_scale_proj_bias = nn.Parameter( + torch.tensor(0.0, dtype=torch.float).view((1,)).repeat(group, )) + self.center_feature_scale_module = CenterFeatureScaleModule() + + def _reset_parameters(self): + constant_(self.offset.weight.data, 0.) + constant_(self.offset.bias.data, 0.) + constant_(self.mask.weight.data, 0.) + constant_(self.mask.bias.data, 0.) + xavier_uniform_(self.input_proj.weight.data) + constant_(self.input_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, input): + """ + :param query (N, H, W, C) + :return output (N, H, W, C) + """ + N, H, W, _ = input.shape + + x = self.input_proj(input) + x_proj = x + + x1 = input.permute(0, 3, 1, 2) + x1 = self.dw_conv(x1) + offset = self.offset(x1) + mask = self.mask(x1).reshape(N, H, W, self.group, -1) + mask = F.softmax(mask, -1).reshape(N, H, W, -1) + + x = dcnv3_core_pytorch( + x, offset, mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale) + if self.center_feature_scale: + center_feature_scale = self.center_feature_scale_module( + x1, self.center_feature_scale_proj_weight, self.center_feature_scale_proj_bias) + # N, H, W, groups -> N, H, W, groups, 1 -> N, H, W, groups, _d_per_group -> N, H, W, channels + center_feature_scale = center_feature_scale[..., None].repeat( + 1, 1, 1, 1, self.channels // self.group).flatten(-2) + x = x * (1 - center_feature_scale) + x_proj * center_feature_scale + x = self.output_proj(x) + + return x + + +class DCNv3(nn.Module): + def __init__( + self, + channels=64, + kernel_size=3, + dw_kernel_size=None, + stride=1, + pad=1, + dilation=1, + group=4, + offset_scale=1.0, + act_layer='GELU', + norm_layer='LN', + center_feature_scale=False, + use_dcn_v4_op=False, + ): + """ + DCNv3 Module + :param channels + :param kernel_size + :param stride + :param pad + :param dilation + :param group + :param offset_scale + :param act_layer + :param norm_layer + """ + super().__init__() + if channels % group != 0: + raise ValueError( + f'channels must be divisible by group, but got {channels} and {group}') + _d_per_group = channels // group + dw_kernel_size = dw_kernel_size if dw_kernel_size is not None else kernel_size + # you'd better set _d_per_group to a power of 2 which is more efficient in our CUDA implementation + if not _is_power_of_2(_d_per_group): + warnings.warn( + "You'd better set channels in DCNv3 to make the dimension of each attention head a power of 2 " + 'which is more efficient in our CUDA implementation.') + + self.offset_scale = offset_scale + self.channels = channels + self.kernel_size = kernel_size + self.dw_kernel_size = dw_kernel_size + self.stride = stride + self.dilation = dilation + self.pad = pad + self.group = group + self.group_channels = channels // group + self.offset_scale = offset_scale + self.center_feature_scale = center_feature_scale + + self.use_dcn_v4_op = use_dcn_v4_op + + self.dw_conv = nn.Sequential( + nn.Conv2d( + channels, + channels, + kernel_size=dw_kernel_size, + stride=1, + padding=(dw_kernel_size - 1) // 2, + groups=channels), + build_norm_layer( + channels, + norm_layer, + 'channels_first', + 'channels_last'), + build_act_layer(act_layer)) + self.offset = nn.Linear( + channels, + group * kernel_size * kernel_size * 2) + self.mask = nn.Linear( + channels, + group * kernel_size * kernel_size) + self.input_proj = nn.Linear(channels, channels) + self.output_proj = nn.Linear(channels, channels) + self._reset_parameters() + + if center_feature_scale: + self.center_feature_scale_proj_weight = nn.Parameter( + torch.zeros((group, channels), dtype=torch.float)) + self.center_feature_scale_proj_bias = nn.Parameter( + torch.tensor(0.0, dtype=torch.float).view((1,)).repeat(group, )) + self.center_feature_scale_module = CenterFeatureScaleModule() + + def _reset_parameters(self): + constant_(self.offset.weight.data, 0.) + constant_(self.offset.bias.data, 0.) + constant_(self.mask.weight.data, 0.) + constant_(self.mask.bias.data, 0.) + xavier_uniform_(self.input_proj.weight.data) + constant_(self.input_proj.bias.data, 0.) + xavier_uniform_(self.output_proj.weight.data) + constant_(self.output_proj.bias.data, 0.) + + def forward(self, input): + """ + :param query (N, H, W, C) + :return output (N, H, W, C) + """ + N, H, W, _ = input.shape + + x = self.input_proj(input) + x_proj = x + dtype = x.dtype + + x1 = input.permute(0, 3, 1, 2) + x1 = self.dw_conv(x1) + offset = self.offset(x1) + mask = self.mask(x1).reshape(N, H, W, self.group, -1) + + if not self.use_dcn_v4_op: + mask = F.softmax(mask, -1).reshape(N, H, W, -1).type(dtype) + x = DCNv3Function.apply( + x, offset, mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale, + 256) + else: + # DCNv4 combines offset and weight mask into one tensor `offset_mask`. + # The following code is to align DCNv3 and DCNv4 + offset = offset.view(N, H, W, self.group, -1) + mask = F.softmax(mask, -1) + mask = mask.view(N, H, W, self.group, -1) + offset_mask = torch.cat([offset, mask], -1).view(N, H, W, -1).contiguous() + + # For efficiency, the last dimension of the offset_mask tensor in dcnv4 is a multiple of 8. + K3 = offset_mask.size(-1) + K3_pad = int(math.ceil(K3 / 8) * 8) + pad_dim = K3_pad - K3 + offset_mask = torch.cat([offset_mask, offset_mask.new_zeros([*offset_mask.size()[:3], pad_dim])], -1) + + x = DCNv4Function.apply( + x, offset_mask, + self.kernel_size, self.kernel_size, + self.stride, self.stride, + self.pad, self.pad, + self.dilation, self.dilation, + self.group, self.group_channels, + self.offset_scale, + 256, + False + ) + + if self.center_feature_scale: + center_feature_scale = self.center_feature_scale_module( + x1, self.center_feature_scale_proj_weight, self.center_feature_scale_proj_bias) + # N, H, W, groups -> N, H, W, groups, 1 -> N, H, W, groups, _d_per_group -> N, H, W, channels + center_feature_scale = center_feature_scale[..., None].repeat( + 1, 1, 1, 1, self.channels // self.group).flatten(-2) + x = x * (1 - center_feature_scale) + x_proj * center_feature_scale + x = self.output_proj(x) + + return x diff --git a/segmentation/ops_dcnv3/setup.py b/segmentation/ops_dcnv3/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..e06f2ea93b4015a17be200639ce8aaf7e1d38a07 --- /dev/null +++ b/segmentation/ops_dcnv3/setup.py @@ -0,0 +1,70 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +import glob +import os + +import torch +from setuptools import find_packages, setup +from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension + +requirements = ['torch', 'torchvision'] + + +def get_extensions(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, 'src') + + main_file = glob.glob(os.path.join(extensions_dir, '*.cpp')) + source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp')) + source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu')) + + sources = main_file + source_cpu + extension = CppExtension + extra_compile_args = {'cxx': []} + define_macros = [] + + if torch.cuda.is_available() and CUDA_HOME is not None: + extension = CUDAExtension + sources += source_cuda + define_macros += [('WITH_CUDA', None)] + extra_compile_args['nvcc'] = [ + # "-DCUDA_HAS_FP16=1", + # "-D__CUDA_NO_HALF_OPERATORS__", + # "-D__CUDA_NO_HALF_CONVERSIONS__", + # "-D__CUDA_NO_HALF2_OPERATORS__", + ] + else: + raise NotImplementedError('Cuda is not availabel') + + sources = [os.path.join(extensions_dir, s) for s in sources] + include_dirs = [extensions_dir] + ext_modules = [ + extension( + 'DCNv3', + sources, + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + return ext_modules + + +setup( + name='DCNv3', + version='1.0', + author='InternImage', + url='https://github.com/OpenGVLab/InternImage', + description= + 'PyTorch Wrapper for CUDA Functions of DCNv3', + packages=find_packages(exclude=( + 'configs', + 'tests', + )), + ext_modules=get_extensions(), + cmdclass={'build_ext': torch.utils.cpp_extension.BuildExtension}, +) diff --git a/segmentation/ops_dcnv3/src/cpu/dcnv3_cpu.cpp b/segmentation/ops_dcnv3/src/cpu/dcnv3_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a3bddc1814e0cae6076102b94bed415f45f61f14 --- /dev/null +++ b/segmentation/ops_dcnv3/src/cpu/dcnv3_cpu.cpp @@ -0,0 +1,37 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include + +#include +#include + +at::Tensor dcnv3_cpu_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const int im2col_step) { + AT_ERROR("Not implement on cpu"); +} + +std::vector +dcnv3_cpu_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const at::Tensor &grad_output, const int im2col_step) { + AT_ERROR("Not implement on cpu"); +} diff --git a/segmentation/ops_dcnv3/src/cpu/dcnv3_cpu.h b/segmentation/ops_dcnv3/src/cpu/dcnv3_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..d457bcbddf7c8fead715109591683012d341d4ea --- /dev/null +++ b/segmentation/ops_dcnv3/src/cpu/dcnv3_cpu.h @@ -0,0 +1,31 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor dcnv3_cpu_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const int im2col_step); + +std::vector +dcnv3_cpu_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const at::Tensor &grad_output, const int im2col_step); diff --git a/segmentation/ops_dcnv3/src/cuda/dcnv3_cuda.cu b/segmentation/ops_dcnv3/src/cuda/dcnv3_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..4ddd2b9fedce43ec8a0f25c3b005af7415929c55 --- /dev/null +++ b/segmentation/ops_dcnv3/src/cuda/dcnv3_cuda.cu @@ -0,0 +1,174 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include "cuda/dcnv3_im2col_cuda.cuh" +#include + +#include +#include +#include +#include +#include + +at::Tensor dcnv3_cuda_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, + const float offset_scale, const int im2col_step) { + AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); + AT_ASSERTM(offset.is_contiguous(), "offset tensor has to be contiguous"); + AT_ASSERTM(mask.is_contiguous(), "mask tensor has to be contiguous"); + AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); + AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); + + const int batch = input.size(0); + const int height_in = input.size(1); + const int width_in = input.size(2); + const int channels = input.size(3); + const int height_out = + (height_in + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + + 1; + const int width_out = + (width_in + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + + 1; + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, + "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + AT_ASSERTM( + channels == (group * group_channels), + "Input channels and group times group channels wont match: (%d vs %d).", + channels, group * group_channels); + + auto output = + at::zeros({batch, height_out, width_out, group * group_channels}, + input.options()); + + const int batch_n = im2col_step_; + auto output_n = output.view({batch / batch_n, batch_n, height_out, + width_out, group * group_channels}); + auto per_input_size = height_in * width_in * group * group_channels; + auto per_offset_size = + height_out * width_out * group * kernel_h * kernel_w * 2; + auto per_mask_size = height_out * width_out * group * kernel_h * kernel_w; + for (int n = 0; n < batch / im2col_step_; ++n) { + auto columns = output_n.select(0, n); + // AT_DISPATCH_FLOATING_TYPES( + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.type(), "ms_deform_attn_forward_cuda", ([&] { + dcnv3_im2col_cuda( + at::cuda::getCurrentCUDAStream(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + + n * im2col_step_ * per_offset_size, + mask.data() + n * im2col_step_ * per_mask_size, + columns.data(), kernel_h, kernel_w, stride_h, + stride_w, pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, batch_n, height_in, width_in, height_out, + width_out, offset_scale); + })); + } + + return output; +} + +std::vector +dcnv3_cuda_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const at::Tensor &grad_output, const int im2col_step) { + + AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous"); + AT_ASSERTM(offset.is_contiguous(), "offset tensor has to be contiguous"); + AT_ASSERTM(mask.is_contiguous(), "mask tensor has to be contiguous"); + AT_ASSERTM(grad_output.is_contiguous(), + "grad_output tensor has to be contiguous"); + AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); + AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor"); + AT_ASSERTM(mask.type().is_cuda(), "mask must be a CUDA tensor"); + AT_ASSERTM(grad_output.type().is_cuda(), + "grad_output must be a CUDA tensor"); + + const int batch = input.size(0); + const int height_in = input.size(1); + const int width_in = input.size(2); + const int channels = input.size(3); + const int height_out = + (height_in + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + + 1; + const int width_out = + (width_in + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + + 1; + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, + "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + AT_ASSERTM( + channels == (group * group_channels), + "Input channels and group times group channels wont match: (%d vs %d).", + channels, group * group_channels); + + auto dtype = input.dtype(); + if (dtype == at::kHalf) { + dtype = at::kFloat; + } + + auto grad_input = at::zeros_like(input, dtype); + auto grad_offset = at::zeros_like(offset, dtype); + auto grad_mask = at::zeros_like(mask, dtype); + + const int batch_n = im2col_step_; + auto per_input_size = height_in * width_in * group * group_channels; + auto per_offset_size = + height_out * width_out * group * kernel_h * kernel_w * 2; + auto per_mask_size = height_out * width_out * group * kernel_h * kernel_w; + auto grad_output_n = + grad_output.view({batch / im2col_step_, batch_n, height_out * width_out, + group, group_channels}); + + for (int n = 0; n < batch / im2col_step_; ++n) { + auto grad_output_g = grad_output_n.select(0, n); + // AT_DISPATCH_FLOATING_TYPES( + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.type(), "ms_deform_attn_backward_cuda", ([&] { + dcnv3_col2im_cuda( + at::cuda::getCurrentCUDAStream(), + grad_output_g.data(), + input.data() + n * im2col_step_ * per_input_size, + offset.data() + + n * im2col_step_ * per_offset_size, + mask.data() + n * im2col_step_ * per_mask_size, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, batch_n, + height_in, width_in, height_out, width_out, offset_scale, + grad_input.data() + + n * im2col_step_ * per_input_size, + grad_offset.data() + + n * im2col_step_ * per_offset_size, + grad_mask.data() + + n * im2col_step_ * per_mask_size); + })); + } + + if (input.dtype() == torch::kHalf) { + return {grad_input.to(torch::kHalf), grad_offset.to(torch::kHalf), + grad_mask.to(torch::kHalf)}; + } else { + return {grad_input, grad_offset, grad_mask}; + } +} diff --git a/segmentation/ops_dcnv3/src/cuda/dcnv3_cuda.h b/segmentation/ops_dcnv3/src/cuda/dcnv3_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..069f2829a927e27d7f341885509e837d09004a8c --- /dev/null +++ b/segmentation/ops_dcnv3/src/cuda/dcnv3_cuda.h @@ -0,0 +1,31 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor dcnv3_cuda_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, + const float offset_scale, const int im2col_step); + +std::vector +dcnv3_cuda_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, + const int pad_h, const int pad_w, const int dilation_h, + const int dilation_w, const int group, + const int group_channels, const float offset_scale, + const at::Tensor &grad_output, const int im2col_step); diff --git a/segmentation/ops_dcnv3/src/cuda/dcnv3_im2col_cuda.cuh b/segmentation/ops_dcnv3/src/cuda/dcnv3_im2col_cuda.cuh new file mode 100644 index 0000000000000000000000000000000000000000..006d376c684322ea558c0d4724e15c3a1bea3c2c --- /dev/null +++ b/segmentation/ops_dcnv3/src/cuda/dcnv3_im2col_cuda.cuh @@ -0,0 +1,1045 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include +#include +#include + +#include +#include +#include +#include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 256; +inline int GET_BLOCKS(const int N, const int num_threads) { + return (N + num_threads - 1) / num_threads; +} + +#define opmath_t at::opmath_type + +template +__device__ opmath_t dcnv3_im2col_bilinear(const scalar_t *&bottom_data, + const int &height, const int &width, + const int &group, + const int &group_channels, + const opmath_t &h, const opmath_t &w, + const int &g, const int &c) { + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const opmath_t lh = h - h_low; + const opmath_t lw = w - w_low; + const opmath_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = group * group_channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = g * group_channels + c; + + opmath_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + opmath_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + opmath_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + opmath_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + const opmath_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const opmath_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template +__device__ void dcnv3_col2im_bilinear( + const scalar_t *&bottom_data, const int &height, const int &width, + const int &nheads, const int &group_channels, const opmath_t &h, + const opmath_t &w, const int &m, const int &c, const opmath_t offset_scale, + const opmath_t &top_grad, const opmath_t &mask, opmath_t *&grad_im, + opmath_t *grad_offset, opmath_t *grad_mask) { + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const opmath_t lh = h - h_low; + const opmath_t lw = w - w_low; + const opmath_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * group_channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * group_channels + c; + + const opmath_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const opmath_t top_grad_im = top_grad * mask; + opmath_t grad_h_weight = 0, grad_w_weight = 0; + + opmath_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_im + ptr1, w1 * top_grad_im); + } + opmath_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_im + ptr2, w2 * top_grad_im); + } + opmath_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_im + ptr3, w3 * top_grad_im); + } + opmath_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_im + ptr4, w4 * top_grad_im); + } + + const opmath_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_mask = top_grad * val; + *grad_offset = offset_scale * grad_w_weight * top_grad_im; + *(grad_offset + 1) = offset_scale * grad_h_weight * top_grad_im; +} + +template +__device__ void dcnv3_col2im_bilinear_gm( + const scalar_t *&bottom_data, const int &height, const int &width, + const int &nheads, const int &group_channels, const opmath_t &h, + const opmath_t &w, const int &m, const int &c, const opmath_t offset_scale, + const opmath_t &top_grad, const opmath_t &mask, opmath_t *&grad_im, + opmath_t *grad_offset, opmath_t *grad_mask) { + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const opmath_t lh = h - h_low; + const opmath_t lw = w - w_low; + const opmath_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * group_channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * group_channels + c; + + const opmath_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const opmath_t top_grad_im = top_grad * mask; + opmath_t grad_h_weight = 0, grad_w_weight = 0; + + opmath_t v1 = 0; + if (h_low >= 0 && w_low >= 0) { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_im + ptr1, w1 * top_grad_im); + } + opmath_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_im + ptr2, w2 * top_grad_im); + } + opmath_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_im + ptr3, w3 * top_grad_im); + } + opmath_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_im + ptr4, w4 * top_grad_im); + } + + const opmath_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_mask, top_grad * val); + atomicAdd(grad_offset, offset_scale * grad_w_weight * top_grad_im); + atomicAdd(grad_offset + 1, offset_scale * grad_h_weight * top_grad_im); +} + +template +__global__ void dcnv3_im2col_gpu_kernel( + const int num_kernels, const scalar_t *data_im, const scalar_t *data_offset, + const scalar_t *data_mask, scalar_t *data_col, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale) { + CUDA_KERNEL_LOOP(index, num_kernels) { + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const int input_size = height_in * width_in; + scalar_t *data_col_ptr = data_col + index; + const int kernel_size = kernel_h * kernel_w; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = group * group_channels; + opmath_t col = 0; + const scalar_t *data_im_ptr = data_im + b_col * input_size * qid_stride; + // top-left + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + col += dcnv3_im2col_bilinear( + data_im_ptr, height_in, width_in, group, + group_channels, loc_h, loc_w, g_col, c_col) * + weight; + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + *data_col_ptr = col; + } +} + +// debug +template +__global__ void dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + __shared__ opmath_t cache_grad_offset[blockSize * 2]; + __shared__ opmath_t cache_grad_mask[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + if (tid == 0) { + opmath_t _grad_w = cache_grad_offset[0], + _grad_h = cache_grad_offset[1], + _grad_a = cache_grad_mask[0]; + int sid = 2; + for (unsigned int tid = 1; tid < blockSize; ++tid) { + _grad_w += cache_grad_offset[sid]; + _grad_h += cache_grad_offset[sid + 1]; + _grad_a += cache_grad_mask[tid]; + sid += 2; + } + + *grad_offset = _grad_w; + *(grad_offset + 1) = _grad_h; + *grad_mask = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + __shared__ opmath_t cache_grad_offset[blockSize * 2]; + __shared__ opmath_t cache_grad_mask[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockSize / 2; s > 0; s >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_mask[tid] += cache_grad_mask[tid + s]; + cache_grad_offset[xid1] += cache_grad_offset[xid2]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) { + *grad_offset = cache_grad_offset[0]; + *(grad_offset + 1) = cache_grad_offset[1]; + *grad_mask = cache_grad_mask[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_shm_reduce_v1( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + extern __shared__ int _s[]; + opmath_t *cache_grad_offset = (opmath_t *)_s; + opmath_t *cache_grad_mask = cache_grad_offset + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + if (tid == 0) { + opmath_t _grad_w = cache_grad_offset[0], + _grad_h = cache_grad_offset[1], + _grad_a = cache_grad_mask[0]; + int sid = 2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) { + _grad_w += cache_grad_offset[sid]; + _grad_h += cache_grad_offset[sid + 1]; + _grad_a += cache_grad_mask[tid]; + sid += 2; + } + + *grad_offset = _grad_w; + *(grad_offset + 1) = _grad_h; + *grad_mask = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_shm_reduce_v2( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + extern __shared__ int _s[]; + opmath_t *cache_grad_offset = (opmath_t *)_s; + opmath_t *cache_grad_mask = cache_grad_offset + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockDim.x / 2, spre = blockDim.x; s > 0; + s >>= 1, spre >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_mask[tid] += cache_grad_mask[tid + s]; + cache_grad_offset[xid1] += cache_grad_offset[xid2]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1]; + if (tid + (s << 1) < spre) { + cache_grad_mask[tid] += + cache_grad_mask[tid + (s << 1)]; + cache_grad_offset[xid1] += + cache_grad_offset[xid2 + (s << 1)]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) { + *grad_offset = cache_grad_offset[0]; + *(grad_offset + 1) = cache_grad_offset[1]; + *grad_mask = cache_grad_mask[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_shm_reduce_v2_multi_blocks( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + extern __shared__ int _s[]; + opmath_t *cache_grad_offset = (opmath_t *)_s; + opmath_t *cache_grad_mask = cache_grad_offset + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + *(cache_grad_offset + (threadIdx.x << 1)) = 0; + *(cache_grad_offset + ((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_mask + threadIdx.x) = 0; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, + cache_grad_offset + (threadIdx.x << 1), + cache_grad_mask + threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s = blockDim.x / 2, spre = blockDim.x; s > 0; + s >>= 1, spre >>= 1) { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_mask[tid] += cache_grad_mask[tid + s]; + cache_grad_offset[xid1] += cache_grad_offset[xid2]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1]; + if (tid + (s << 1) < spre) { + cache_grad_mask[tid] += + cache_grad_mask[tid + (s << 1)]; + cache_grad_offset[xid1] += + cache_grad_offset[xid2 + (s << 1)]; + cache_grad_offset[xid1 + 1] += + cache_grad_offset[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) { + atomicAdd(grad_offset, cache_grad_offset[0]); + atomicAdd(grad_offset + 1, cache_grad_offset[1]); + atomicAdd(grad_mask, cache_grad_mask[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } +} + +template +__global__ void dcnv3_col2im_gpu_kernel_gm( + const int num_kernels, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int height_in, + const int width_in, const int height_out, const int width_out, + const opmath_t offset_scale, opmath_t *grad_im, opmath_t *grad_offset, + opmath_t *grad_mask) { + CUDA_KERNEL_LOOP(index, num_kernels) { + int _temp = index; + const int c_col = _temp % group_channels; + _temp /= group_channels; + const int sampling_index = _temp; + const int g_col = _temp % group; + _temp /= group; + const int p0_w = ((dilation_w * (kernel_w - 1)) >> 1) - pad_w + + (_temp % width_out) * stride_w; + _temp /= width_out; + const int p0_h = ((dilation_h * (kernel_h - 1)) >> 1) - pad_h + + (_temp % height_out) * stride_h; + _temp /= height_out; + const int b_col = _temp; + + const opmath_t top_grad = grad_col[index]; + const int input_size = height_in * width_in; + const int kernel_size = kernel_h * kernel_w; + int data_weight_ptr = sampling_index * kernel_size; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_offset += grad_sampling_ptr << 1; + grad_mask += grad_sampling_ptr; + const int qid_stride = group * group_channels; + const int im_ptr_offset = b_col * input_size * qid_stride; + const scalar_t *data_im_ptr = data_im + im_ptr_offset; + opmath_t *grad_im_ptr = grad_im + im_ptr_offset; + const opmath_t p0_w_ = + p0_w - ((dilation_w * (kernel_w - 1)) >> 1) * offset_scale; + const opmath_t p0_h_ = + p0_h - ((dilation_h * (kernel_h - 1)) >> 1) * offset_scale; + for (int i = 0; i < kernel_w; ++i) { + for (int j = 0; j < kernel_h; ++j) { + const opmath_t offset_w = data_offset[data_loc_w_ptr]; + const opmath_t offset_h = data_offset[data_loc_w_ptr + 1]; + const opmath_t loc_w = + p0_w_ + (i * dilation_w + offset_w) * offset_scale; + const opmath_t loc_h = + p0_h_ + (j * dilation_h + offset_h) * offset_scale; + const opmath_t weight = data_mask[data_weight_ptr]; + if (loc_h > -1 && loc_w > -1 && loc_h < height_in && + loc_w < width_in) { + dcnv3_col2im_bilinear_gm( + data_im_ptr, height_in, width_in, group, group_channels, + loc_h, loc_w, g_col, c_col, offset_scale, top_grad, + weight, grad_im_ptr, grad_offset, grad_mask); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_mask += 1; + grad_offset += 2; + } + } + } +} + +template +void dcnv3_im2col_cuda(cudaStream_t stream, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, + scalar_t *data_col, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, + const int dilation_h, const int dilation_w, + const int group, const int group_channels, + const int batch_n, const int height_in, + const int width_in, const int height_out, + const int width_out, const opmath_t offset_scale) { + const int num_kernels = + batch_n * height_out * width_out * group * group_channels; + const int num_actual_kernels = + batch_n * height_out * width_out * group * group_channels; + const int num_threads = CUDA_NUM_THREADS; + dcnv3_im2col_gpu_kernel + <<>>(num_kernels, data_im, data_offset, data_mask, data_col, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, height_in, + width_in, height_out, width_out, offset_scale); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in dcnv3_im2col_cuda: %s\n", cudaGetErrorString(err)); + } +} + +template +void dcnv3_col2im_cuda( + cudaStream_t stream, const scalar_t *grad_col, const scalar_t *data_im, + const scalar_t *data_offset, const scalar_t *data_mask, const int kernel_h, + const int kernel_w, const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, const int batch_n, + const int height_in, const int width_in, const int height_out, + const int width_out, const opmath_t offset_scale, opmath_t *grad_im, + opmath_t *grad_offset, opmath_t *grad_mask) { + const int num_threads = + (group_channels > CUDA_NUM_THREADS) ? CUDA_NUM_THREADS : group_channels; + const int num_kernels = + batch_n * height_out * width_out * group * group_channels; + const int num_actual_kernels = + batch_n * height_out * width_out * group * group_channels; + if (group_channels > 1024) { + if ((group_channels & 1023) == 0) { + dcnv3_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, grad_col, data_im, data_offset, data_mask, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, height_in, + width_in, height_out, width_out, offset_scale, grad_im, + grad_offset, grad_mask); + } else { + dcnv3_col2im_gpu_kernel_gm + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + } + } else { + switch (group_channels) { + case 1: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 2: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 4: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 8: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 16: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 32: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 64: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 128: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 256: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 512: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + case 1024: + dcnv3_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>(num_kernels, grad_col, data_im, data_offset, + data_mask, kernel_h, kernel_w, stride_h, stride_w, + pad_h, pad_w, dilation_h, dilation_w, group, + group_channels, height_in, width_in, height_out, + width_out, offset_scale, grad_im, grad_offset, + grad_mask); + break; + default: + if (group_channels < 64) { + dcnv3_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, grad_col, data_im, data_offset, data_mask, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, + height_in, width_in, height_out, width_out, + offset_scale, grad_im, grad_offset, grad_mask); + } else { + dcnv3_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, grad_col, data_im, data_offset, data_mask, + kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w, + dilation_h, dilation_w, group, group_channels, + height_in, width_in, height_out, width_out, + offset_scale, grad_im, grad_offset, grad_mask); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in dcnv3_col2im_cuda: %s\n", cudaGetErrorString(err)); + } +} diff --git a/segmentation/ops_dcnv3/src/dcnv3.h b/segmentation/ops_dcnv3/src/dcnv3.h new file mode 100644 index 0000000000000000000000000000000000000000..029648e17afb8556e90908fcdbe2da237973dca0 --- /dev/null +++ b/segmentation/ops_dcnv3/src/dcnv3.h @@ -0,0 +1,59 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once + +#include "cpu/dcnv3_cpu.h" + +#ifdef WITH_CUDA +#include "cuda/dcnv3_cuda.h" +#endif + +at::Tensor dcnv3_forward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, + const int kernel_w, const int stride_h, + const int stride_w, const int pad_h, const int pad_w, + const int dilation_h, const int dilation_w, + const int group, const int group_channels, + const float offset_scale, const int im2col_step) { + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return dcnv3_cuda_forward(input, offset, mask, kernel_h, kernel_w, + stride_h, stride_w, pad_h, pad_w, dilation_h, + dilation_w, group, group_channels, + offset_scale, im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector +dcnv3_backward(const at::Tensor &input, const at::Tensor &offset, + const at::Tensor &mask, const int kernel_h, const int kernel_w, + const int stride_h, const int stride_w, const int pad_h, + const int pad_w, const int dilation_h, const int dilation_w, + const int group, const int group_channels, + const float offset_scale, const at::Tensor &grad_output, + const int im2col_step) { + if (input.type().is_cuda()) { +#ifdef WITH_CUDA + return dcnv3_cuda_backward(input, offset, mask, kernel_h, kernel_w, + stride_h, stride_w, pad_h, pad_w, dilation_h, + dilation_w, group, group_channels, + offset_scale, grad_output, im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} diff --git a/segmentation/ops_dcnv3/src/vision.cpp b/segmentation/ops_dcnv3/src/vision.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1f7a9087147bb8752202064c154c43078df3ad88 --- /dev/null +++ b/segmentation/ops_dcnv3/src/vision.cpp @@ -0,0 +1,17 @@ +/*! +************************************************************************************************** +* InternImage +* Copyright (c) 2022 OpenGVLab +* Licensed under The MIT License [see LICENSE for details] +************************************************************************************************** +* Modified from +*https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include "dcnv3.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("dcnv3_forward", &dcnv3_forward, "dcnv3_forward"); + m.def("dcnv3_backward", &dcnv3_backward, "dcnv3_backward"); +} diff --git a/segmentation/ops_dcnv3/test.py b/segmentation/ops_dcnv3/test.py new file mode 100644 index 0000000000000000000000000000000000000000..7ff44aeae1c56d8db1f0a250defb6d25e2e9b7b7 --- /dev/null +++ b/segmentation/ops_dcnv3/test.py @@ -0,0 +1,261 @@ +# -------------------------------------------------------- +# InternImage +# Copyright (c) 2022 OpenGVLab +# Licensed under The MIT License [see LICENSE for details] +# -------------------------------------------------------- + +from __future__ import absolute_import, division, print_function + +import math +import time + +import torch +import torch.nn as nn +from functions.dcnv3_func import DCNv3Function, dcnv3_core_pytorch +from torch.autograd import gradcheck + +H_in, W_in = 8, 8 +N, M, D = 2, 4, 16 +Kh, Kw = 3, 3 +P = Kh * Kw +offset_scale = 2.0 +pad = 1 +dilation = 1 +stride = 1 +H_out = (H_in + 2 * pad - (dilation * (Kh - 1) + 1)) // stride + 1 +W_out = (W_in + 2 * pad - (dilation * (Kw - 1) + 1)) // stride + 1 + +torch.manual_seed(3) + + +@torch.no_grad() +def check_forward_equal_with_pytorch_double(): + input = torch.rand(N, H_in, W_in, M*D).cuda() * 0.01 + offset = torch.rand(N, H_out, W_out, M*P*2).cuda() * 10 + mask = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask /= mask.sum(-1, keepdim=True) + mask = mask.reshape(N, H_out, W_out, M*P) + + output_pytorch = dcnv3_core_pytorch( + input.double(), + offset.double(), + mask.double(), + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, offset_scale).detach().cpu() + + im2col_step = 2 + output_cuda = DCNv3Function.apply( + input.double(), + offset.double(), + mask.double(), + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, offset_scale, + im2col_step).detach().cpu() + + fwdok = torch.allclose(output_cuda, output_pytorch) + max_abs_err = (output_cuda - output_pytorch).abs().max() + max_rel_err = ((output_cuda - output_pytorch).abs() / + output_pytorch.abs()).max() + print('>>> forward double') + print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +@torch.no_grad() +def check_forward_equal_with_pytorch_float(): + input = torch.rand(N, H_in, W_in, M*D).cuda() * 0.01 + offset = torch.rand(N, H_out, W_out, M*P*2).cuda() * 10 + mask = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask /= mask.sum(-1, keepdim=True) + mask = mask.reshape(N, H_out, W_out, M*P) + + output_pytorch = dcnv3_core_pytorch( + input, + offset, + mask, + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, offset_scale).detach().cpu() + + im2col_step = 2 + output_cuda = DCNv3Function.apply( + input, + offset, + mask, + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, offset_scale, + im2col_step).detach().cpu() + + fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3) + max_abs_err = (output_cuda - output_pytorch).abs().max() + max_rel_err = ((output_cuda - output_pytorch).abs() / + output_pytorch.abs()).max() + print('>>> forward float') + print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +def check_backward_equal_with_pytorch_double(channels=4, grad_input=True, grad_offset=True, grad_mask=True): + # H_in, W_in = 4, 4 + N = 2 + M = 2 + H_out = (H_in + 2 * pad - (dilation * (Kh - 1) + 1)) // stride + 1 + W_out = (W_in + 2 * pad - (dilation * (Kw - 1) + 1)) // stride + 1 + + D = channels + input0 = torch.rand(N, H_in, W_in, M*D).cuda() * 0.01 + offset0 = torch.rand(N, H_out, W_out, M*P*2).cuda() * 10 + mask0 = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask0 /= mask0.sum(-1, keepdim=True) + mask0 = mask0.reshape(N, H_out, W_out, M*P) + input0.requires_grad = grad_input + offset0.requires_grad = grad_offset + mask0.requires_grad = grad_mask + + output_pytorch = dcnv3_core_pytorch( + input0.double(), + offset0.double(), + mask0.double(), + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, offset_scale) + output_pytorch.sum().backward() + + input1 = input0.detach() + offset1 = offset0.detach() + mask1 = mask0.detach() + input1.requires_grad = grad_input + offset1.requires_grad = grad_offset + mask1.requires_grad = grad_mask + + im2col_step = 2 + output_cuda = DCNv3Function.apply( + input1.double(), + offset1.double(), + mask1.double(), + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, offset_scale, + im2col_step) + output_cuda.sum().backward() + + print(f'>>> backward double: channels {D}') + bwdok = torch.allclose(input0.grad, input1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (input0.grad - input1.grad).abs().max() + max_rel_err = ((input0.grad - input1.grad).abs() / + input0.grad.abs()).max() + print( + f'* {bwdok} input_grad check_backward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + bwdok = torch.allclose(offset0.grad, offset1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (offset0.grad - offset1.grad).abs().max() + max_rel_err = ((offset0.grad - offset1.grad).abs() / + offset0.grad.abs()).max() + print( + f'* {bwdok} offset_grad check_backward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + bwdok = torch.allclose(mask0.grad, mask1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (mask0.grad - mask1.grad).abs().max() + max_rel_err = ((mask0.grad - mask1.grad).abs() / + mask0.grad.abs()).max() + print( + f'* {bwdok} mask_grad check_backward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +def check_backward_equal_with_pytorch_float(channels=4, grad_input=True, grad_offset=True, grad_mask=True): + # H_in, W_in = 4, 4 + N = 2 + M = 2 + H_out = (H_in + 2 * pad - (dilation * (Kh - 1) + 1)) // stride + 1 + W_out = (W_in + 2 * pad - (dilation * (Kw - 1) + 1)) // stride + 1 + + D = channels + input0 = torch.rand(N, H_in, W_in, M*D).cuda() * 0.01 + offset0 = torch.rand(N, H_out, W_out, M*P*2).cuda() * 10 + mask0 = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask0 /= mask0.sum(-1, keepdim=True) + mask0 = mask0.reshape(N, H_out, W_out, M*P) + input0.requires_grad = grad_input + offset0.requires_grad = grad_offset + mask0.requires_grad = grad_mask + + output_pytorch = dcnv3_core_pytorch( + input0, + offset0, + mask0, + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, offset_scale) + output_pytorch.sum().backward() + + input1 = input0.detach() + offset1 = offset0.detach() + mask1 = mask0.detach() + input1.requires_grad = grad_input + offset1.requires_grad = grad_offset + mask1.requires_grad = grad_mask + + im2col_step = 2 + output_cuda = DCNv3Function.apply( + input1, + offset1, + mask1, + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, offset_scale, + im2col_step) + output_cuda.sum().backward() + + print(f'>>> backward float: channels {D}') + bwdok = torch.allclose(input0.grad, input1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (input0.grad - input1.grad).abs().max() + max_rel_err = ((input0.grad - input1.grad).abs() / + input0.grad.abs()).max() + print( + f'* {bwdok} input_grad check_backward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + bwdok = torch.allclose(offset0.grad, offset1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (offset0.grad - offset1.grad).abs().max() + max_rel_err = ((offset0.grad - offset1.grad).abs() / + offset0.grad.abs()).max() + print( + f'* {bwdok} offset_grad check_backward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + bwdok = torch.allclose(mask0.grad, mask1.grad, rtol=1e-2, atol=1e-3) + max_abs_err = (mask0.grad - mask1.grad).abs().max() + max_rel_err = ((mask0.grad - mask1.grad).abs() / + mask0.grad.abs()).max() + print( + f'* {bwdok} mask_grad check_backward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}') + + +@torch.no_grad() +def check_time_cost(im2col_step=128): + N = 512 + H_in, W_in = 64, 64 + H_out = (H_in + 2 * pad - (dilation * (Kh - 1) + 1)) // stride + 1 + W_out = (W_in + 2 * pad - (dilation * (Kw - 1) + 1)) // stride + 1 + + input = torch.rand(N, H_in, W_in, M*D).cuda() * 0.01 + offset = torch.rand(N, H_out, W_out, M*P*2).cuda() * 10 + mask = torch.rand(N, H_out, W_out, M, P).cuda() + 1e-5 + mask /= mask.sum(-1, keepdim=True) + mask = mask.reshape(N, H_out, W_out, M*P) + print( + f'>>> time cost: im2col_step {im2col_step}; input {input.shape}; points {P} ') + repeat = 100 + for i in range(repeat): + output_cuda = DCNv3Function.apply( + input, + offset, + mask, + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, 1.0, + im2col_step) + torch.cuda.synchronize() + start = time.time() + for i in range(repeat): + output_cuda = DCNv3Function.apply( + input, + offset, + mask, + Kh, Kw, stride, stride, Kh // 2, Kw // 2, dilation, dilation, M, D, 1.0, + im2col_step) + torch.cuda.synchronize() + print(f'foward time cost: {(time.time() - start) / repeat}') + + +if __name__ == '__main__': + check_forward_equal_with_pytorch_double() + check_forward_equal_with_pytorch_float() + for channels in [1, 16, 30, 32, 64, 71, 1025]: + check_backward_equal_with_pytorch_double(channels, True, True, True) + for channels in [1, 16, 30, 32, 64, 71, 1025]: + check_backward_equal_with_pytorch_float(channels, True, True, True) + for i in range(3): + im2col_step = 128 * (2 ** i) + check_time_cost(im2col_step) diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020306.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020306.log new file mode 100644 index 0000000000000000000000000000000000000000..3924b6c739e396476ab46a1db010cb5968d9ee40 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020306.log @@ -0,0 +1,5239 @@ +2025-05-28 02:03:06,394 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:03:06,398 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:03:06,445 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:03:06,445 - mmseg - INFO - Distributed training: True +2025-05-28 02:03:06,925 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='configs/cityscapes/splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='configs/cityscapes/splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ]), + data_root='/pasteur/u/yiming/homework4/cityscapes') +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:03:06,925 - mmseg - INFO - Set random seed to 1975608665, deterministic: False +2025-05-28 02:03:06,926 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:03:06,926 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:03:06,926 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:03:06,926 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:03:06,926 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:03:06,926 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:03:06,926 - mmseg - INFO - res_post_norm: True +2025-05-28 02:03:06,926 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:03:32,735 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020501.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020501.log new file mode 100644 index 0000000000000000000000000000000000000000..2c4a655f83f742e1a8248c3bd2ebf5d28aff5313 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020501.log @@ -0,0 +1,5239 @@ +2025-05-28 02:05:01,607 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:05:01,607 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:05:01,648 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:05:01,648 - mmseg - INFO - Distributed training: True +2025-05-28 02:05:02,066 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='configs/cityscapes/splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='configs/cityscapes/splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ]), + data_root='/pasteur/u/yiming/homework4/cityscapes') +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:05:02,067 - mmseg - INFO - Set random seed to 2091714909, deterministic: False +2025-05-28 02:05:02,068 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:05:02,068 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:05:02,068 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:05:02,068 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:05:02,068 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:05:02,068 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:05:02,068 - mmseg - INFO - res_post_norm: True +2025-05-28 02:05:02,068 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:05:26,816 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020630.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020630.log new file mode 100644 index 0000000000000000000000000000000000000000..7123168a086f7755cd76b39b53fcb21bebe2200a --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_020630.log @@ -0,0 +1,5240 @@ +2025-05-28 02:06:30,631 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:06:30,632 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:06:30,672 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:06:30,672 - mmseg - INFO - Distributed training: True +2025-05-28 02:06:31,106 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ]), + data_root='/pasteur/u/yiming/homework4/cityscapes') +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:06:31,107 - mmseg - INFO - Set random seed to 272994515, deterministic: False +2025-05-28 02:06:31,107 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:06:31,107 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:06:31,107 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:06:31,107 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:06:31,107 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:06:31,107 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:06:31,107 - mmseg - INFO - res_post_norm: True +2025-05-28 02:06:31,108 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:06:53,760 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 02:07:06,573 - mmseg - INFO - Loaded 1 images diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022041.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022041.log new file mode 100644 index 0000000000000000000000000000000000000000..1287408bcb1aba121b24eeecae2e43d3a76d357c --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022041.log @@ -0,0 +1,7656 @@ +2025-05-28 02:20:41,225 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:20:41,226 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:20:41,271 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:20:41,271 - mmseg - INFO - Distributed training: True +2025-05-28 02:20:41,702 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:20:41,702 - mmseg - INFO - Set random seed to 719920476, deterministic: False +2025-05-28 02:20:41,703 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:20:41,703 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:20:41,703 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:20:41,703 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:20:41,703 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:20:41,704 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:20:41,704 - mmseg - INFO - res_post_norm: True +2025-05-28 02:20:41,704 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:21:05,822 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 02:21:16,234 - mmseg - INFO - Loaded 1 images +2025-05-28 02:21:27,595 - mmseg - INFO - {'num_layers': 50, 'layer_decay_rate': 0.95, 'depths': [6, 6, 32, 6], 'offset_lr_scale': 1.0} +2025-05-28 02:21:27,595 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.950000 - 52 +2025-05-28 02:21:27,605 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight", + "decode_head.query_embed.weight", + "decode_head.query_feat.weight", + "decode_head.level_embed.weight", + "decode_head.cls_embed.weight", + "decode_head.mask_embed.0.weight", + "decode_head.mask_embed.2.weight", + "decode_head.mask_embed.4.weight" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias", + "decode_head.cls_embed.bias", + "decode_head.mask_embed.0.bias", + "decode_head.mask_embed.2.bias", + "decode_head.mask_embed.4.bias" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias", + "backbone.levels.0.blocks.0.res_post_norm1.0.weight", + "backbone.levels.0.blocks.0.res_post_norm1.0.bias", + "backbone.levels.0.blocks.0.res_post_norm2.0.weight", + "backbone.levels.0.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias", + "backbone.levels.0.blocks.1.res_post_norm1.0.weight", + "backbone.levels.0.blocks.1.res_post_norm1.0.bias", + "backbone.levels.0.blocks.1.res_post_norm2.0.weight", + "backbone.levels.0.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias", + "backbone.levels.0.blocks.2.res_post_norm1.0.weight", + "backbone.levels.0.blocks.2.res_post_norm1.0.bias", + "backbone.levels.0.blocks.2.res_post_norm2.0.weight", + "backbone.levels.0.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias", + "backbone.levels.0.blocks.3.res_post_norm1.0.weight", + "backbone.levels.0.blocks.3.res_post_norm1.0.bias", + "backbone.levels.0.blocks.3.res_post_norm2.0.weight", + "backbone.levels.0.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias", + "backbone.levels.0.blocks.4.res_post_norm1.0.weight", + "backbone.levels.0.blocks.4.res_post_norm1.0.bias", + "backbone.levels.0.blocks.4.res_post_norm2.0.weight", + "backbone.levels.0.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.norm1.0.weight", + "backbone.levels.0.blocks.5.norm1.0.bias", + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.5.dcn.offset.bias", + "backbone.levels.0.blocks.5.dcn.mask.bias", + "backbone.levels.0.blocks.5.dcn.input_proj.bias", + "backbone.levels.0.blocks.5.dcn.output_proj.bias", + "backbone.levels.0.blocks.5.norm2.0.weight", + "backbone.levels.0.blocks.5.norm2.0.bias", + "backbone.levels.0.blocks.5.mlp.fc1.bias", + "backbone.levels.0.blocks.5.mlp.fc2.bias", + "backbone.levels.0.blocks.5.res_post_norm1.0.weight", + "backbone.levels.0.blocks.5.res_post_norm1.0.bias", + "backbone.levels.0.blocks.5.res_post_norm2.0.weight", + "backbone.levels.0.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.0 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.5.dcn.offset.weight", + "backbone.levels.0.blocks.5.dcn.mask.weight", + "backbone.levels.0.blocks.5.dcn.input_proj.weight", + "backbone.levels.0.blocks.5.dcn.output_proj.weight", + "backbone.levels.0.blocks.5.mlp.fc1.weight", + "backbone.levels.0.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.05 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.0.norm.0.weight", + "backbone.levels.0.norm.0.bias", + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias", + "backbone.levels.1.blocks.0.res_post_norm1.0.weight", + "backbone.levels.1.blocks.0.res_post_norm1.0.bias", + "backbone.levels.1.blocks.0.res_post_norm2.0.weight", + "backbone.levels.1.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias", + "backbone.levels.1.blocks.1.res_post_norm1.0.weight", + "backbone.levels.1.blocks.1.res_post_norm1.0.bias", + "backbone.levels.1.blocks.1.res_post_norm2.0.weight", + "backbone.levels.1.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias", + "backbone.levels.1.blocks.2.res_post_norm1.0.weight", + "backbone.levels.1.blocks.2.res_post_norm1.0.bias", + "backbone.levels.1.blocks.2.res_post_norm2.0.weight", + "backbone.levels.1.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias", + "backbone.levels.1.blocks.3.res_post_norm1.0.weight", + "backbone.levels.1.blocks.3.res_post_norm1.0.bias", + "backbone.levels.1.blocks.3.res_post_norm2.0.weight", + "backbone.levels.1.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias", + "backbone.levels.1.blocks.4.res_post_norm1.0.weight", + "backbone.levels.1.blocks.4.res_post_norm1.0.bias", + "backbone.levels.1.blocks.4.res_post_norm2.0.weight", + "backbone.levels.1.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.0 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.05 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.norm1.0.weight", + "backbone.levels.1.blocks.5.norm1.0.bias", + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.5.dcn.offset.bias", + "backbone.levels.1.blocks.5.dcn.mask.bias", + "backbone.levels.1.blocks.5.dcn.input_proj.bias", + "backbone.levels.1.blocks.5.dcn.output_proj.bias", + "backbone.levels.1.blocks.5.norm2.0.weight", + "backbone.levels.1.blocks.5.norm2.0.bias", + "backbone.levels.1.blocks.5.mlp.fc1.bias", + "backbone.levels.1.blocks.5.mlp.fc2.bias", + "backbone.levels.1.blocks.5.res_post_norm1.0.weight", + "backbone.levels.1.blocks.5.res_post_norm1.0.bias", + "backbone.levels.1.blocks.5.res_post_norm2.0.weight", + "backbone.levels.1.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.5.dcn.offset.weight", + "backbone.levels.1.blocks.5.dcn.mask.weight", + "backbone.levels.1.blocks.5.dcn.input_proj.weight", + "backbone.levels.1.blocks.5.dcn.output_proj.weight", + "backbone.levels.1.blocks.5.mlp.fc1.weight", + "backbone.levels.1.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.1.norm.0.weight", + "backbone.levels.1.norm.0.bias", + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias", + "backbone.levels.2.blocks.0.res_post_norm1.0.weight", + "backbone.levels.2.blocks.0.res_post_norm1.0.bias", + "backbone.levels.2.blocks.0.res_post_norm2.0.weight", + "backbone.levels.2.blocks.0.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.0.0.weight", + "backbone.levels.2.post_norms.0.0.bias" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias", + "backbone.levels.2.blocks.1.res_post_norm1.0.weight", + "backbone.levels.2.blocks.1.res_post_norm1.0.bias", + "backbone.levels.2.blocks.1.res_post_norm2.0.weight", + "backbone.levels.2.blocks.1.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.1.0.weight", + "backbone.levels.2.post_norms.1.0.bias" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias", + "backbone.levels.2.blocks.2.res_post_norm1.0.weight", + "backbone.levels.2.blocks.2.res_post_norm1.0.bias", + "backbone.levels.2.blocks.2.res_post_norm2.0.weight", + "backbone.levels.2.blocks.2.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.2.0.weight", + "backbone.levels.2.post_norms.2.0.bias" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias", + "backbone.levels.2.blocks.3.res_post_norm1.0.weight", + "backbone.levels.2.blocks.3.res_post_norm1.0.bias", + "backbone.levels.2.blocks.3.res_post_norm2.0.weight", + "backbone.levels.2.blocks.3.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.3.0.weight", + "backbone.levels.2.post_norms.3.0.bias" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias", + "backbone.levels.2.blocks.4.res_post_norm1.0.weight", + "backbone.levels.2.blocks.4.res_post_norm1.0.bias", + "backbone.levels.2.blocks.4.res_post_norm2.0.weight", + "backbone.levels.2.blocks.4.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.4.0.weight", + "backbone.levels.2.post_norms.4.0.bias" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias", + "backbone.levels.2.blocks.5.res_post_norm1.0.weight", + "backbone.levels.2.blocks.5.res_post_norm1.0.bias", + "backbone.levels.2.blocks.5.res_post_norm2.0.weight", + "backbone.levels.2.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias", + "backbone.levels.2.blocks.6.res_post_norm1.0.weight", + "backbone.levels.2.blocks.6.res_post_norm1.0.bias", + "backbone.levels.2.blocks.6.res_post_norm2.0.weight", + "backbone.levels.2.blocks.6.res_post_norm2.0.bias" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias", + "backbone.levels.2.blocks.7.res_post_norm1.0.weight", + "backbone.levels.2.blocks.7.res_post_norm1.0.bias", + "backbone.levels.2.blocks.7.res_post_norm2.0.weight", + "backbone.levels.2.blocks.7.res_post_norm2.0.bias" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias", + "backbone.levels.2.blocks.8.res_post_norm1.0.weight", + "backbone.levels.2.blocks.8.res_post_norm1.0.bias", + "backbone.levels.2.blocks.8.res_post_norm2.0.weight", + "backbone.levels.2.blocks.8.res_post_norm2.0.bias" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias", + "backbone.levels.2.blocks.9.res_post_norm1.0.weight", + "backbone.levels.2.blocks.9.res_post_norm1.0.bias", + "backbone.levels.2.blocks.9.res_post_norm2.0.weight", + "backbone.levels.2.blocks.9.res_post_norm2.0.bias" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias", + "backbone.levels.2.blocks.10.res_post_norm1.0.weight", + "backbone.levels.2.blocks.10.res_post_norm1.0.bias", + "backbone.levels.2.blocks.10.res_post_norm2.0.weight", + "backbone.levels.2.blocks.10.res_post_norm2.0.bias" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias", + "backbone.levels.2.blocks.11.res_post_norm1.0.weight", + "backbone.levels.2.blocks.11.res_post_norm1.0.bias", + "backbone.levels.2.blocks.11.res_post_norm2.0.weight", + "backbone.levels.2.blocks.11.res_post_norm2.0.bias" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias", + "backbone.levels.2.blocks.12.res_post_norm1.0.weight", + "backbone.levels.2.blocks.12.res_post_norm1.0.bias", + "backbone.levels.2.blocks.12.res_post_norm2.0.weight", + "backbone.levels.2.blocks.12.res_post_norm2.0.bias" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias", + "backbone.levels.2.blocks.13.res_post_norm1.0.weight", + "backbone.levels.2.blocks.13.res_post_norm1.0.bias", + "backbone.levels.2.blocks.13.res_post_norm2.0.weight", + "backbone.levels.2.blocks.13.res_post_norm2.0.bias" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias", + "backbone.levels.2.blocks.14.res_post_norm1.0.weight", + "backbone.levels.2.blocks.14.res_post_norm1.0.bias", + "backbone.levels.2.blocks.14.res_post_norm2.0.weight", + "backbone.levels.2.blocks.14.res_post_norm2.0.bias" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias", + "backbone.levels.2.blocks.15.res_post_norm1.0.weight", + "backbone.levels.2.blocks.15.res_post_norm1.0.bias", + "backbone.levels.2.blocks.15.res_post_norm2.0.weight", + "backbone.levels.2.blocks.15.res_post_norm2.0.bias" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias", + "backbone.levels.2.blocks.16.res_post_norm1.0.weight", + "backbone.levels.2.blocks.16.res_post_norm1.0.bias", + "backbone.levels.2.blocks.16.res_post_norm2.0.weight", + "backbone.levels.2.blocks.16.res_post_norm2.0.bias" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias", + "backbone.levels.2.blocks.17.res_post_norm1.0.weight", + "backbone.levels.2.blocks.17.res_post_norm1.0.bias", + "backbone.levels.2.blocks.17.res_post_norm2.0.weight", + "backbone.levels.2.blocks.17.res_post_norm2.0.bias" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias", + "backbone.levels.2.blocks.18.res_post_norm1.0.weight", + "backbone.levels.2.blocks.18.res_post_norm1.0.bias", + "backbone.levels.2.blocks.18.res_post_norm2.0.weight", + "backbone.levels.2.blocks.18.res_post_norm2.0.bias" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias", + "backbone.levels.2.blocks.19.res_post_norm1.0.weight", + "backbone.levels.2.blocks.19.res_post_norm1.0.bias", + "backbone.levels.2.blocks.19.res_post_norm2.0.weight", + "backbone.levels.2.blocks.19.res_post_norm2.0.bias" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias", + "backbone.levels.2.blocks.20.res_post_norm1.0.weight", + "backbone.levels.2.blocks.20.res_post_norm1.0.bias", + "backbone.levels.2.blocks.20.res_post_norm2.0.weight", + "backbone.levels.2.blocks.20.res_post_norm2.0.bias" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias", + "backbone.levels.2.blocks.21.res_post_norm1.0.weight", + "backbone.levels.2.blocks.21.res_post_norm1.0.bias", + "backbone.levels.2.blocks.21.res_post_norm2.0.weight", + "backbone.levels.2.blocks.21.res_post_norm2.0.bias" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias", + "backbone.levels.2.blocks.22.res_post_norm1.0.weight", + "backbone.levels.2.blocks.22.res_post_norm1.0.bias", + "backbone.levels.2.blocks.22.res_post_norm2.0.weight", + "backbone.levels.2.blocks.22.res_post_norm2.0.bias" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.0 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.05 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias", + "backbone.levels.2.blocks.23.res_post_norm1.0.weight", + "backbone.levels.2.blocks.23.res_post_norm1.0.bias", + "backbone.levels.2.blocks.23.res_post_norm2.0.weight", + "backbone.levels.2.blocks.23.res_post_norm2.0.bias" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.norm1.0.weight", + "backbone.levels.2.blocks.24.norm1.0.bias", + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.24.dcn.offset.bias", + "backbone.levels.2.blocks.24.dcn.mask.bias", + "backbone.levels.2.blocks.24.dcn.input_proj.bias", + "backbone.levels.2.blocks.24.dcn.output_proj.bias", + "backbone.levels.2.blocks.24.norm2.0.weight", + "backbone.levels.2.blocks.24.norm2.0.bias", + "backbone.levels.2.blocks.24.mlp.fc1.bias", + "backbone.levels.2.blocks.24.mlp.fc2.bias", + "backbone.levels.2.blocks.24.res_post_norm1.0.weight", + "backbone.levels.2.blocks.24.res_post_norm1.0.bias", + "backbone.levels.2.blocks.24.res_post_norm2.0.weight", + "backbone.levels.2.blocks.24.res_post_norm2.0.bias" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.24.dcn.offset.weight", + "backbone.levels.2.blocks.24.dcn.mask.weight", + "backbone.levels.2.blocks.24.dcn.input_proj.weight", + "backbone.levels.2.blocks.24.dcn.output_proj.weight", + "backbone.levels.2.blocks.24.mlp.fc1.weight", + "backbone.levels.2.blocks.24.mlp.fc2.weight" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.norm1.0.weight", + "backbone.levels.2.blocks.25.norm1.0.bias", + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.25.dcn.offset.bias", + "backbone.levels.2.blocks.25.dcn.mask.bias", + "backbone.levels.2.blocks.25.dcn.input_proj.bias", + "backbone.levels.2.blocks.25.dcn.output_proj.bias", + "backbone.levels.2.blocks.25.norm2.0.weight", + "backbone.levels.2.blocks.25.norm2.0.bias", + "backbone.levels.2.blocks.25.mlp.fc1.bias", + "backbone.levels.2.blocks.25.mlp.fc2.bias", + "backbone.levels.2.blocks.25.res_post_norm1.0.weight", + "backbone.levels.2.blocks.25.res_post_norm1.0.bias", + "backbone.levels.2.blocks.25.res_post_norm2.0.weight", + "backbone.levels.2.blocks.25.res_post_norm2.0.bias" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.25.dcn.offset.weight", + "backbone.levels.2.blocks.25.dcn.mask.weight", + "backbone.levels.2.blocks.25.dcn.input_proj.weight", + "backbone.levels.2.blocks.25.dcn.output_proj.weight", + "backbone.levels.2.blocks.25.mlp.fc1.weight", + "backbone.levels.2.blocks.25.mlp.fc2.weight" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.norm1.0.weight", + "backbone.levels.2.blocks.26.norm1.0.bias", + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.26.dcn.offset.bias", + "backbone.levels.2.blocks.26.dcn.mask.bias", + "backbone.levels.2.blocks.26.dcn.input_proj.bias", + "backbone.levels.2.blocks.26.dcn.output_proj.bias", + "backbone.levels.2.blocks.26.norm2.0.weight", + "backbone.levels.2.blocks.26.norm2.0.bias", + "backbone.levels.2.blocks.26.mlp.fc1.bias", + "backbone.levels.2.blocks.26.mlp.fc2.bias", + "backbone.levels.2.blocks.26.res_post_norm1.0.weight", + "backbone.levels.2.blocks.26.res_post_norm1.0.bias", + "backbone.levels.2.blocks.26.res_post_norm2.0.weight", + "backbone.levels.2.blocks.26.res_post_norm2.0.bias" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.26.dcn.offset.weight", + "backbone.levels.2.blocks.26.dcn.mask.weight", + "backbone.levels.2.blocks.26.dcn.input_proj.weight", + "backbone.levels.2.blocks.26.dcn.output_proj.weight", + "backbone.levels.2.blocks.26.mlp.fc1.weight", + "backbone.levels.2.blocks.26.mlp.fc2.weight" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.norm1.0.weight", + "backbone.levels.2.blocks.27.norm1.0.bias", + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.27.dcn.offset.bias", + "backbone.levels.2.blocks.27.dcn.mask.bias", + "backbone.levels.2.blocks.27.dcn.input_proj.bias", + "backbone.levels.2.blocks.27.dcn.output_proj.bias", + "backbone.levels.2.blocks.27.norm2.0.weight", + "backbone.levels.2.blocks.27.norm2.0.bias", + "backbone.levels.2.blocks.27.mlp.fc1.bias", + "backbone.levels.2.blocks.27.mlp.fc2.bias", + "backbone.levels.2.blocks.27.res_post_norm1.0.weight", + "backbone.levels.2.blocks.27.res_post_norm1.0.bias", + "backbone.levels.2.blocks.27.res_post_norm2.0.weight", + "backbone.levels.2.blocks.27.res_post_norm2.0.bias" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.0 + }, + "layer_40_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.27.dcn.offset.weight", + "backbone.levels.2.blocks.27.dcn.mask.weight", + "backbone.levels.2.blocks.27.dcn.input_proj.weight", + "backbone.levels.2.blocks.27.dcn.output_proj.weight", + "backbone.levels.2.blocks.27.mlp.fc1.weight", + "backbone.levels.2.blocks.27.mlp.fc2.weight" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.05 + }, + "layer_41_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.norm1.0.weight", + "backbone.levels.2.blocks.28.norm1.0.bias", + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.28.dcn.offset.bias", + "backbone.levels.2.blocks.28.dcn.mask.bias", + "backbone.levels.2.blocks.28.dcn.input_proj.bias", + "backbone.levels.2.blocks.28.dcn.output_proj.bias", + "backbone.levels.2.blocks.28.norm2.0.weight", + "backbone.levels.2.blocks.28.norm2.0.bias", + "backbone.levels.2.blocks.28.mlp.fc1.bias", + "backbone.levels.2.blocks.28.mlp.fc2.bias", + "backbone.levels.2.blocks.28.res_post_norm1.0.weight", + "backbone.levels.2.blocks.28.res_post_norm1.0.bias", + "backbone.levels.2.blocks.28.res_post_norm2.0.weight", + "backbone.levels.2.blocks.28.res_post_norm2.0.bias" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.0 + }, + "layer_41_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.28.dcn.offset.weight", + "backbone.levels.2.blocks.28.dcn.mask.weight", + "backbone.levels.2.blocks.28.dcn.input_proj.weight", + "backbone.levels.2.blocks.28.dcn.output_proj.weight", + "backbone.levels.2.blocks.28.mlp.fc1.weight", + "backbone.levels.2.blocks.28.mlp.fc2.weight" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.05 + }, + "layer_42_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.norm1.0.weight", + "backbone.levels.2.blocks.29.norm1.0.bias", + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.29.dcn.offset.bias", + "backbone.levels.2.blocks.29.dcn.mask.bias", + "backbone.levels.2.blocks.29.dcn.input_proj.bias", + "backbone.levels.2.blocks.29.dcn.output_proj.bias", + "backbone.levels.2.blocks.29.norm2.0.weight", + "backbone.levels.2.blocks.29.norm2.0.bias", + "backbone.levels.2.blocks.29.mlp.fc1.bias", + "backbone.levels.2.blocks.29.mlp.fc2.bias", + "backbone.levels.2.blocks.29.res_post_norm1.0.weight", + "backbone.levels.2.blocks.29.res_post_norm1.0.bias", + "backbone.levels.2.blocks.29.res_post_norm2.0.weight", + "backbone.levels.2.blocks.29.res_post_norm2.0.bias" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.0 + }, + "layer_42_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.29.dcn.offset.weight", + "backbone.levels.2.blocks.29.dcn.mask.weight", + "backbone.levels.2.blocks.29.dcn.input_proj.weight", + "backbone.levels.2.blocks.29.dcn.output_proj.weight", + "backbone.levels.2.blocks.29.mlp.fc1.weight", + "backbone.levels.2.blocks.29.mlp.fc2.weight" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.05 + }, + "layer_43_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.norm1.0.weight", + "backbone.levels.2.blocks.30.norm1.0.bias", + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.30.dcn.offset.bias", + "backbone.levels.2.blocks.30.dcn.mask.bias", + "backbone.levels.2.blocks.30.dcn.input_proj.bias", + "backbone.levels.2.blocks.30.dcn.output_proj.bias", + "backbone.levels.2.blocks.30.norm2.0.weight", + "backbone.levels.2.blocks.30.norm2.0.bias", + "backbone.levels.2.blocks.30.mlp.fc1.bias", + "backbone.levels.2.blocks.30.mlp.fc2.bias", + "backbone.levels.2.blocks.30.res_post_norm1.0.weight", + "backbone.levels.2.blocks.30.res_post_norm1.0.bias", + "backbone.levels.2.blocks.30.res_post_norm2.0.weight", + "backbone.levels.2.blocks.30.res_post_norm2.0.bias" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.0 + }, + "layer_43_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.30.dcn.offset.weight", + "backbone.levels.2.blocks.30.dcn.mask.weight", + "backbone.levels.2.blocks.30.dcn.input_proj.weight", + "backbone.levels.2.blocks.30.dcn.output_proj.weight", + "backbone.levels.2.blocks.30.mlp.fc1.weight", + "backbone.levels.2.blocks.30.mlp.fc2.weight" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.05 + }, + "layer_44_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.norm1.0.weight", + "backbone.levels.2.blocks.31.norm1.0.bias", + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.31.dcn.offset.bias", + "backbone.levels.2.blocks.31.dcn.mask.bias", + "backbone.levels.2.blocks.31.dcn.input_proj.bias", + "backbone.levels.2.blocks.31.dcn.output_proj.bias", + "backbone.levels.2.blocks.31.norm2.0.weight", + "backbone.levels.2.blocks.31.norm2.0.bias", + "backbone.levels.2.blocks.31.mlp.fc1.bias", + "backbone.levels.2.blocks.31.mlp.fc2.bias", + "backbone.levels.2.blocks.31.res_post_norm1.0.weight", + "backbone.levels.2.blocks.31.res_post_norm1.0.bias", + "backbone.levels.2.blocks.31.res_post_norm2.0.weight", + "backbone.levels.2.blocks.31.res_post_norm2.0.bias" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.0 + }, + "layer_44_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.31.dcn.offset.weight", + "backbone.levels.2.blocks.31.dcn.mask.weight", + "backbone.levels.2.blocks.31.dcn.input_proj.weight", + "backbone.levels.2.blocks.31.dcn.output_proj.weight", + "backbone.levels.2.blocks.31.mlp.fc1.weight", + "backbone.levels.2.blocks.31.mlp.fc2.weight" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.05 + }, + "layer_45_no_decay": { + "param_names": [ + "backbone.levels.2.norm.0.weight", + "backbone.levels.2.norm.0.bias", + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias", + "backbone.levels.3.blocks.0.res_post_norm1.0.weight", + "backbone.levels.3.blocks.0.res_post_norm1.0.bias", + "backbone.levels.3.blocks.0.res_post_norm2.0.weight", + "backbone.levels.3.blocks.0.res_post_norm2.0.bias", + "backbone.levels.3.norm.0.weight", + "backbone.levels.3.norm.0.bias" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.0 + }, + "layer_45_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.05 + }, + "layer_46_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias", + "backbone.levels.3.blocks.1.res_post_norm1.0.weight", + "backbone.levels.3.blocks.1.res_post_norm1.0.bias", + "backbone.levels.3.blocks.1.res_post_norm2.0.weight", + "backbone.levels.3.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.0 + }, + "layer_46_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.05 + }, + "layer_47_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias", + "backbone.levels.3.blocks.2.res_post_norm1.0.weight", + "backbone.levels.3.blocks.2.res_post_norm1.0.bias", + "backbone.levels.3.blocks.2.res_post_norm2.0.weight", + "backbone.levels.3.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.0 + }, + "layer_47_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.05 + }, + "layer_48_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias", + "backbone.levels.3.blocks.3.res_post_norm1.0.weight", + "backbone.levels.3.blocks.3.res_post_norm1.0.bias", + "backbone.levels.3.blocks.3.res_post_norm2.0.weight", + "backbone.levels.3.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.0 + }, + "layer_48_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.05 + }, + "layer_49_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias", + "backbone.levels.3.blocks.4.res_post_norm1.0.weight", + "backbone.levels.3.blocks.4.res_post_norm1.0.bias", + "backbone.levels.3.blocks.4.res_post_norm2.0.weight", + "backbone.levels.3.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.0 + }, + "layer_49_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.05 + }, + "layer_50_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.norm1.0.weight", + "backbone.levels.3.blocks.5.norm1.0.bias", + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.5.dcn.offset.bias", + "backbone.levels.3.blocks.5.dcn.mask.bias", + "backbone.levels.3.blocks.5.dcn.input_proj.bias", + "backbone.levels.3.blocks.5.dcn.output_proj.bias", + "backbone.levels.3.blocks.5.norm2.0.weight", + "backbone.levels.3.blocks.5.norm2.0.bias", + "backbone.levels.3.blocks.5.mlp.fc1.bias", + "backbone.levels.3.blocks.5.mlp.fc2.bias", + "backbone.levels.3.blocks.5.res_post_norm1.0.weight", + "backbone.levels.3.blocks.5.res_post_norm1.0.bias", + "backbone.levels.3.blocks.5.res_post_norm2.0.weight", + "backbone.levels.3.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.0 + }, + "layer_50_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.5.dcn.offset.weight", + "backbone.levels.3.blocks.5.dcn.mask.weight", + "backbone.levels.3.blocks.5.dcn.input_proj.weight", + "backbone.levels.3.blocks.5.dcn.output_proj.weight", + "backbone.levels.3.blocks.5.mlp.fc1.weight", + "backbone.levels.3.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.05 + }, + "layer_51_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.weight", + "decode_head.pixel_decoder.input_convs.1.conv.weight", + "decode_head.pixel_decoder.input_convs.2.conv.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.level_encoding.weight", + "decode_head.pixel_decoder.lateral_convs.0.conv.weight", + "decode_head.pixel_decoder.output_convs.0.conv.weight", + "decode_head.pixel_decoder.mask_feature.weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.05 + }, + "layer_51_no_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.bias", + "decode_head.pixel_decoder.input_convs.0.gn.weight", + "decode_head.pixel_decoder.input_convs.0.gn.bias", + "decode_head.pixel_decoder.input_convs.1.conv.bias", + "decode_head.pixel_decoder.input_convs.1.gn.weight", + "decode_head.pixel_decoder.input_convs.1.gn.bias", + "decode_head.pixel_decoder.input_convs.2.conv.bias", + "decode_head.pixel_decoder.input_convs.2.gn.weight", + "decode_head.pixel_decoder.input_convs.2.gn.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.bias", + "decode_head.pixel_decoder.lateral_convs.0.gn.weight", + "decode_head.pixel_decoder.lateral_convs.0.gn.bias", + "decode_head.pixel_decoder.output_convs.0.gn.weight", + "decode_head.pixel_decoder.output_convs.0.gn.bias", + "decode_head.pixel_decoder.mask_feature.bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.0.norms.0.weight", + "decode_head.transformer_decoder.layers.0.norms.0.bias", + "decode_head.transformer_decoder.layers.0.norms.1.weight", + "decode_head.transformer_decoder.layers.0.norms.1.bias", + "decode_head.transformer_decoder.layers.0.norms.2.weight", + "decode_head.transformer_decoder.layers.0.norms.2.bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.1.norms.0.weight", + "decode_head.transformer_decoder.layers.1.norms.0.bias", + "decode_head.transformer_decoder.layers.1.norms.1.weight", + "decode_head.transformer_decoder.layers.1.norms.1.bias", + "decode_head.transformer_decoder.layers.1.norms.2.weight", + "decode_head.transformer_decoder.layers.1.norms.2.bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.2.norms.0.weight", + "decode_head.transformer_decoder.layers.2.norms.0.bias", + "decode_head.transformer_decoder.layers.2.norms.1.weight", + "decode_head.transformer_decoder.layers.2.norms.1.bias", + "decode_head.transformer_decoder.layers.2.norms.2.weight", + "decode_head.transformer_decoder.layers.2.norms.2.bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.3.norms.0.weight", + "decode_head.transformer_decoder.layers.3.norms.0.bias", + "decode_head.transformer_decoder.layers.3.norms.1.weight", + "decode_head.transformer_decoder.layers.3.norms.1.bias", + "decode_head.transformer_decoder.layers.3.norms.2.weight", + "decode_head.transformer_decoder.layers.3.norms.2.bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.4.norms.0.weight", + "decode_head.transformer_decoder.layers.4.norms.0.bias", + "decode_head.transformer_decoder.layers.4.norms.1.weight", + "decode_head.transformer_decoder.layers.4.norms.1.bias", + "decode_head.transformer_decoder.layers.4.norms.2.weight", + "decode_head.transformer_decoder.layers.4.norms.2.bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.5.norms.0.weight", + "decode_head.transformer_decoder.layers.5.norms.0.bias", + "decode_head.transformer_decoder.layers.5.norms.1.weight", + "decode_head.transformer_decoder.layers.5.norms.1.bias", + "decode_head.transformer_decoder.layers.5.norms.2.weight", + "decode_head.transformer_decoder.layers.5.norms.2.bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.6.norms.0.weight", + "decode_head.transformer_decoder.layers.6.norms.0.bias", + "decode_head.transformer_decoder.layers.6.norms.1.weight", + "decode_head.transformer_decoder.layers.6.norms.1.bias", + "decode_head.transformer_decoder.layers.6.norms.2.weight", + "decode_head.transformer_decoder.layers.6.norms.2.bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.7.norms.0.weight", + "decode_head.transformer_decoder.layers.7.norms.0.bias", + "decode_head.transformer_decoder.layers.7.norms.1.weight", + "decode_head.transformer_decoder.layers.7.norms.1.bias", + "decode_head.transformer_decoder.layers.7.norms.2.weight", + "decode_head.transformer_decoder.layers.7.norms.2.bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.8.norms.0.weight", + "decode_head.transformer_decoder.layers.8.norms.0.bias", + "decode_head.transformer_decoder.layers.8.norms.1.weight", + "decode_head.transformer_decoder.layers.8.norms.1.bias", + "decode_head.transformer_decoder.layers.8.norms.2.weight", + "decode_head.transformer_decoder.layers.8.norms.2.bias", + "decode_head.transformer_decoder.post_norm.weight", + "decode_head.transformer_decoder.post_norm.bias" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.0 + } +} +2025-05-28 02:21:27,626 - mmseg - INFO - Loaded 1 images +2025-05-28 02:21:27,626 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth +2025-05-28 02:22:11,650 - mmseg - INFO - Start running, host: yiming@pasteur2.stanford.edu, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 02:22:11,651 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 02:22:11,651 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters +2025-05-28 02:22:11,651 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022041.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022041.log.json new file mode 100644 index 0000000000000000000000000000000000000000..848643c1b6b9ace6665ddb06c81fbc3c0e18c320 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022041.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0: NVIDIA TITAN RTX\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 12.2, V12.2.91\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 719920476, "exp_name": "mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "num_things_classes = 100\nnum_stuff_classes = 50\nnum_classes = 19\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderMask2Former',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=320,\n depths=[6, 6, 32, 6],\n groups=[10, 20, 40, 80],\n mlp_ratio=4.0,\n drop_path_rate=0.5,\n norm_layer='LN',\n layer_scale=None,\n offset_scale=1.0,\n post_norm=False,\n dw_kernel_size=5,\n res_post_norm=True,\n level2_post_norm=True,\n level2_post_norm_block_ids=[5, 11, 17, 23, 29],\n center_feature_scale=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='Mask2FormerHead',\n in_channels=[320, 640, 1280, 2560],\n feat_channels=256,\n out_channels=256,\n in_index=[0, 1, 2, 3],\n num_things_classes=100,\n num_stuff_classes=50,\n num_queries=100,\n num_transformer_feat_level=3,\n pixel_decoder=dict(\n type='MSDeformAttnPixelDecoder',\n num_outs=3,\n norm_cfg=dict(type='GN', num_groups=32),\n act_cfg=dict(type='ReLU'),\n encoder=dict(\n type='DetrTransformerEncoder',\n num_layers=6,\n transformerlayers=dict(\n type='BaseTransformerLayer',\n attn_cfgs=dict(\n type='MultiScaleDeformableAttention',\n embed_dims=256,\n num_heads=8,\n num_levels=3,\n num_points=4,\n im2col_step=64,\n dropout=0.0,\n batch_first=False,\n norm_cfg=None,\n init_cfg=None),\n ffn_cfgs=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n with_cp=False),\n operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n init_cfg=None),\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n init_cfg=None),\n enforce_decoder_input_project=False,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n transformer_decoder=dict(\n type='DetrTransformerDecoder',\n return_intermediate=True,\n num_layers=9,\n transformerlayers=dict(\n type='DetrTransformerDecoderLayer',\n attn_cfgs=dict(\n type='MultiheadAttention',\n embed_dims=256,\n num_heads=8,\n attn_drop=0.0,\n proj_drop=0.0,\n dropout_layer=None,\n batch_first=False),\n ffn_cfgs=dict(\n embed_dims=256,\n feedforward_channels=2048,\n num_fcs=2,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_drop=0.0,\n dropout_layer=None,\n add_identity=True,\n with_cp=False),\n feedforward_channels=2048,\n operation_order=('cross_attn', 'norm', 'self_attn', 'norm',\n 'ffn', 'norm')),\n init_cfg=None),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=2.0,\n reduction='mean',\n class_weight=[\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1\n ]),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=5.0),\n loss_dice=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n reduction='mean',\n naive_dice=True,\n eps=1.0,\n loss_weight=5.0),\n num_classes=19,\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512))),\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512)),\n init_cfg=None)\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (1024, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(\n type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=1e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=50,\n layer_decay_rate=0.95,\n depths=[6, 6, 32, 6],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 1)\nauto_resume = False\ndevice = 'cuda'\nseed = 719920476\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022236.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022236.log new file mode 100644 index 0000000000000000000000000000000000000000..2f284adffd9653733fa7ac3cd11a6e93e1642889 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022236.log @@ -0,0 +1,7656 @@ +2025-05-28 02:22:36,677 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:22:36,679 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:22:36,721 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:22:36,722 - mmseg - INFO - Distributed training: True +2025-05-28 02:22:37,149 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:22:37,149 - mmseg - INFO - Set random seed to 1501318398, deterministic: False +2025-05-28 02:22:37,150 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:22:37,150 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:22:37,150 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:22:37,150 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:22:37,150 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:22:37,150 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:22:37,150 - mmseg - INFO - res_post_norm: True +2025-05-28 02:22:37,150 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:23:02,911 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 02:23:09,543 - mmseg - INFO - Loaded 1 images +2025-05-28 02:23:15,280 - mmseg - INFO - {'num_layers': 50, 'layer_decay_rate': 0.95, 'depths': [6, 6, 32, 6], 'offset_lr_scale': 1.0} +2025-05-28 02:23:15,281 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.950000 - 52 +2025-05-28 02:23:15,291 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight", + "decode_head.query_embed.weight", + "decode_head.query_feat.weight", + "decode_head.level_embed.weight", + "decode_head.cls_embed.weight", + "decode_head.mask_embed.0.weight", + "decode_head.mask_embed.2.weight", + "decode_head.mask_embed.4.weight" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias", + "decode_head.cls_embed.bias", + "decode_head.mask_embed.0.bias", + "decode_head.mask_embed.2.bias", + "decode_head.mask_embed.4.bias" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias", + "backbone.levels.0.blocks.0.res_post_norm1.0.weight", + "backbone.levels.0.blocks.0.res_post_norm1.0.bias", + "backbone.levels.0.blocks.0.res_post_norm2.0.weight", + "backbone.levels.0.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias", + "backbone.levels.0.blocks.1.res_post_norm1.0.weight", + "backbone.levels.0.blocks.1.res_post_norm1.0.bias", + "backbone.levels.0.blocks.1.res_post_norm2.0.weight", + "backbone.levels.0.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias", + "backbone.levels.0.blocks.2.res_post_norm1.0.weight", + "backbone.levels.0.blocks.2.res_post_norm1.0.bias", + "backbone.levels.0.blocks.2.res_post_norm2.0.weight", + "backbone.levels.0.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias", + "backbone.levels.0.blocks.3.res_post_norm1.0.weight", + "backbone.levels.0.blocks.3.res_post_norm1.0.bias", + "backbone.levels.0.blocks.3.res_post_norm2.0.weight", + "backbone.levels.0.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias", + "backbone.levels.0.blocks.4.res_post_norm1.0.weight", + "backbone.levels.0.blocks.4.res_post_norm1.0.bias", + "backbone.levels.0.blocks.4.res_post_norm2.0.weight", + "backbone.levels.0.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.norm1.0.weight", + "backbone.levels.0.blocks.5.norm1.0.bias", + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.5.dcn.offset.bias", + "backbone.levels.0.blocks.5.dcn.mask.bias", + "backbone.levels.0.blocks.5.dcn.input_proj.bias", + "backbone.levels.0.blocks.5.dcn.output_proj.bias", + "backbone.levels.0.blocks.5.norm2.0.weight", + "backbone.levels.0.blocks.5.norm2.0.bias", + "backbone.levels.0.blocks.5.mlp.fc1.bias", + "backbone.levels.0.blocks.5.mlp.fc2.bias", + "backbone.levels.0.blocks.5.res_post_norm1.0.weight", + "backbone.levels.0.blocks.5.res_post_norm1.0.bias", + "backbone.levels.0.blocks.5.res_post_norm2.0.weight", + "backbone.levels.0.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.0 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.5.dcn.offset.weight", + "backbone.levels.0.blocks.5.dcn.mask.weight", + "backbone.levels.0.blocks.5.dcn.input_proj.weight", + "backbone.levels.0.blocks.5.dcn.output_proj.weight", + "backbone.levels.0.blocks.5.mlp.fc1.weight", + "backbone.levels.0.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.05 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.0.norm.0.weight", + "backbone.levels.0.norm.0.bias", + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias", + "backbone.levels.1.blocks.0.res_post_norm1.0.weight", + "backbone.levels.1.blocks.0.res_post_norm1.0.bias", + "backbone.levels.1.blocks.0.res_post_norm2.0.weight", + "backbone.levels.1.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias", + "backbone.levels.1.blocks.1.res_post_norm1.0.weight", + "backbone.levels.1.blocks.1.res_post_norm1.0.bias", + "backbone.levels.1.blocks.1.res_post_norm2.0.weight", + "backbone.levels.1.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias", + "backbone.levels.1.blocks.2.res_post_norm1.0.weight", + "backbone.levels.1.blocks.2.res_post_norm1.0.bias", + "backbone.levels.1.blocks.2.res_post_norm2.0.weight", + "backbone.levels.1.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias", + "backbone.levels.1.blocks.3.res_post_norm1.0.weight", + "backbone.levels.1.blocks.3.res_post_norm1.0.bias", + "backbone.levels.1.blocks.3.res_post_norm2.0.weight", + "backbone.levels.1.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias", + "backbone.levels.1.blocks.4.res_post_norm1.0.weight", + "backbone.levels.1.blocks.4.res_post_norm1.0.bias", + "backbone.levels.1.blocks.4.res_post_norm2.0.weight", + "backbone.levels.1.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.0 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.05 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.norm1.0.weight", + "backbone.levels.1.blocks.5.norm1.0.bias", + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.5.dcn.offset.bias", + "backbone.levels.1.blocks.5.dcn.mask.bias", + "backbone.levels.1.blocks.5.dcn.input_proj.bias", + "backbone.levels.1.blocks.5.dcn.output_proj.bias", + "backbone.levels.1.blocks.5.norm2.0.weight", + "backbone.levels.1.blocks.5.norm2.0.bias", + "backbone.levels.1.blocks.5.mlp.fc1.bias", + "backbone.levels.1.blocks.5.mlp.fc2.bias", + "backbone.levels.1.blocks.5.res_post_norm1.0.weight", + "backbone.levels.1.blocks.5.res_post_norm1.0.bias", + "backbone.levels.1.blocks.5.res_post_norm2.0.weight", + "backbone.levels.1.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.5.dcn.offset.weight", + "backbone.levels.1.blocks.5.dcn.mask.weight", + "backbone.levels.1.blocks.5.dcn.input_proj.weight", + "backbone.levels.1.blocks.5.dcn.output_proj.weight", + "backbone.levels.1.blocks.5.mlp.fc1.weight", + "backbone.levels.1.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.1.norm.0.weight", + "backbone.levels.1.norm.0.bias", + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias", + "backbone.levels.2.blocks.0.res_post_norm1.0.weight", + "backbone.levels.2.blocks.0.res_post_norm1.0.bias", + "backbone.levels.2.blocks.0.res_post_norm2.0.weight", + "backbone.levels.2.blocks.0.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.0.0.weight", + "backbone.levels.2.post_norms.0.0.bias" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias", + "backbone.levels.2.blocks.1.res_post_norm1.0.weight", + "backbone.levels.2.blocks.1.res_post_norm1.0.bias", + "backbone.levels.2.blocks.1.res_post_norm2.0.weight", + "backbone.levels.2.blocks.1.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.1.0.weight", + "backbone.levels.2.post_norms.1.0.bias" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias", + "backbone.levels.2.blocks.2.res_post_norm1.0.weight", + "backbone.levels.2.blocks.2.res_post_norm1.0.bias", + "backbone.levels.2.blocks.2.res_post_norm2.0.weight", + "backbone.levels.2.blocks.2.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.2.0.weight", + "backbone.levels.2.post_norms.2.0.bias" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias", + "backbone.levels.2.blocks.3.res_post_norm1.0.weight", + "backbone.levels.2.blocks.3.res_post_norm1.0.bias", + "backbone.levels.2.blocks.3.res_post_norm2.0.weight", + "backbone.levels.2.blocks.3.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.3.0.weight", + "backbone.levels.2.post_norms.3.0.bias" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias", + "backbone.levels.2.blocks.4.res_post_norm1.0.weight", + "backbone.levels.2.blocks.4.res_post_norm1.0.bias", + "backbone.levels.2.blocks.4.res_post_norm2.0.weight", + "backbone.levels.2.blocks.4.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.4.0.weight", + "backbone.levels.2.post_norms.4.0.bias" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias", + "backbone.levels.2.blocks.5.res_post_norm1.0.weight", + "backbone.levels.2.blocks.5.res_post_norm1.0.bias", + "backbone.levels.2.blocks.5.res_post_norm2.0.weight", + "backbone.levels.2.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias", + "backbone.levels.2.blocks.6.res_post_norm1.0.weight", + "backbone.levels.2.blocks.6.res_post_norm1.0.bias", + "backbone.levels.2.blocks.6.res_post_norm2.0.weight", + "backbone.levels.2.blocks.6.res_post_norm2.0.bias" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias", + "backbone.levels.2.blocks.7.res_post_norm1.0.weight", + "backbone.levels.2.blocks.7.res_post_norm1.0.bias", + "backbone.levels.2.blocks.7.res_post_norm2.0.weight", + "backbone.levels.2.blocks.7.res_post_norm2.0.bias" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias", + "backbone.levels.2.blocks.8.res_post_norm1.0.weight", + "backbone.levels.2.blocks.8.res_post_norm1.0.bias", + "backbone.levels.2.blocks.8.res_post_norm2.0.weight", + "backbone.levels.2.blocks.8.res_post_norm2.0.bias" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias", + "backbone.levels.2.blocks.9.res_post_norm1.0.weight", + "backbone.levels.2.blocks.9.res_post_norm1.0.bias", + "backbone.levels.2.blocks.9.res_post_norm2.0.weight", + "backbone.levels.2.blocks.9.res_post_norm2.0.bias" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias", + "backbone.levels.2.blocks.10.res_post_norm1.0.weight", + "backbone.levels.2.blocks.10.res_post_norm1.0.bias", + "backbone.levels.2.blocks.10.res_post_norm2.0.weight", + "backbone.levels.2.blocks.10.res_post_norm2.0.bias" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias", + "backbone.levels.2.blocks.11.res_post_norm1.0.weight", + "backbone.levels.2.blocks.11.res_post_norm1.0.bias", + "backbone.levels.2.blocks.11.res_post_norm2.0.weight", + "backbone.levels.2.blocks.11.res_post_norm2.0.bias" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias", + "backbone.levels.2.blocks.12.res_post_norm1.0.weight", + "backbone.levels.2.blocks.12.res_post_norm1.0.bias", + "backbone.levels.2.blocks.12.res_post_norm2.0.weight", + "backbone.levels.2.blocks.12.res_post_norm2.0.bias" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias", + "backbone.levels.2.blocks.13.res_post_norm1.0.weight", + "backbone.levels.2.blocks.13.res_post_norm1.0.bias", + "backbone.levels.2.blocks.13.res_post_norm2.0.weight", + "backbone.levels.2.blocks.13.res_post_norm2.0.bias" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias", + "backbone.levels.2.blocks.14.res_post_norm1.0.weight", + "backbone.levels.2.blocks.14.res_post_norm1.0.bias", + "backbone.levels.2.blocks.14.res_post_norm2.0.weight", + "backbone.levels.2.blocks.14.res_post_norm2.0.bias" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias", + "backbone.levels.2.blocks.15.res_post_norm1.0.weight", + "backbone.levels.2.blocks.15.res_post_norm1.0.bias", + "backbone.levels.2.blocks.15.res_post_norm2.0.weight", + "backbone.levels.2.blocks.15.res_post_norm2.0.bias" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias", + "backbone.levels.2.blocks.16.res_post_norm1.0.weight", + "backbone.levels.2.blocks.16.res_post_norm1.0.bias", + "backbone.levels.2.blocks.16.res_post_norm2.0.weight", + "backbone.levels.2.blocks.16.res_post_norm2.0.bias" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias", + "backbone.levels.2.blocks.17.res_post_norm1.0.weight", + "backbone.levels.2.blocks.17.res_post_norm1.0.bias", + "backbone.levels.2.blocks.17.res_post_norm2.0.weight", + "backbone.levels.2.blocks.17.res_post_norm2.0.bias" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias", + "backbone.levels.2.blocks.18.res_post_norm1.0.weight", + "backbone.levels.2.blocks.18.res_post_norm1.0.bias", + "backbone.levels.2.blocks.18.res_post_norm2.0.weight", + "backbone.levels.2.blocks.18.res_post_norm2.0.bias" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias", + "backbone.levels.2.blocks.19.res_post_norm1.0.weight", + "backbone.levels.2.blocks.19.res_post_norm1.0.bias", + "backbone.levels.2.blocks.19.res_post_norm2.0.weight", + "backbone.levels.2.blocks.19.res_post_norm2.0.bias" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias", + "backbone.levels.2.blocks.20.res_post_norm1.0.weight", + "backbone.levels.2.blocks.20.res_post_norm1.0.bias", + "backbone.levels.2.blocks.20.res_post_norm2.0.weight", + "backbone.levels.2.blocks.20.res_post_norm2.0.bias" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias", + "backbone.levels.2.blocks.21.res_post_norm1.0.weight", + "backbone.levels.2.blocks.21.res_post_norm1.0.bias", + "backbone.levels.2.blocks.21.res_post_norm2.0.weight", + "backbone.levels.2.blocks.21.res_post_norm2.0.bias" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias", + "backbone.levels.2.blocks.22.res_post_norm1.0.weight", + "backbone.levels.2.blocks.22.res_post_norm1.0.bias", + "backbone.levels.2.blocks.22.res_post_norm2.0.weight", + "backbone.levels.2.blocks.22.res_post_norm2.0.bias" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.0 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.05 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias", + "backbone.levels.2.blocks.23.res_post_norm1.0.weight", + "backbone.levels.2.blocks.23.res_post_norm1.0.bias", + "backbone.levels.2.blocks.23.res_post_norm2.0.weight", + "backbone.levels.2.blocks.23.res_post_norm2.0.bias" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.norm1.0.weight", + "backbone.levels.2.blocks.24.norm1.0.bias", + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.24.dcn.offset.bias", + "backbone.levels.2.blocks.24.dcn.mask.bias", + "backbone.levels.2.blocks.24.dcn.input_proj.bias", + "backbone.levels.2.blocks.24.dcn.output_proj.bias", + "backbone.levels.2.blocks.24.norm2.0.weight", + "backbone.levels.2.blocks.24.norm2.0.bias", + "backbone.levels.2.blocks.24.mlp.fc1.bias", + "backbone.levels.2.blocks.24.mlp.fc2.bias", + "backbone.levels.2.blocks.24.res_post_norm1.0.weight", + "backbone.levels.2.blocks.24.res_post_norm1.0.bias", + "backbone.levels.2.blocks.24.res_post_norm2.0.weight", + "backbone.levels.2.blocks.24.res_post_norm2.0.bias" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.24.dcn.offset.weight", + "backbone.levels.2.blocks.24.dcn.mask.weight", + "backbone.levels.2.blocks.24.dcn.input_proj.weight", + "backbone.levels.2.blocks.24.dcn.output_proj.weight", + "backbone.levels.2.blocks.24.mlp.fc1.weight", + "backbone.levels.2.blocks.24.mlp.fc2.weight" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.norm1.0.weight", + "backbone.levels.2.blocks.25.norm1.0.bias", + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.25.dcn.offset.bias", + "backbone.levels.2.blocks.25.dcn.mask.bias", + "backbone.levels.2.blocks.25.dcn.input_proj.bias", + "backbone.levels.2.blocks.25.dcn.output_proj.bias", + "backbone.levels.2.blocks.25.norm2.0.weight", + "backbone.levels.2.blocks.25.norm2.0.bias", + "backbone.levels.2.blocks.25.mlp.fc1.bias", + "backbone.levels.2.blocks.25.mlp.fc2.bias", + "backbone.levels.2.blocks.25.res_post_norm1.0.weight", + "backbone.levels.2.blocks.25.res_post_norm1.0.bias", + "backbone.levels.2.blocks.25.res_post_norm2.0.weight", + "backbone.levels.2.blocks.25.res_post_norm2.0.bias" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.25.dcn.offset.weight", + "backbone.levels.2.blocks.25.dcn.mask.weight", + "backbone.levels.2.blocks.25.dcn.input_proj.weight", + "backbone.levels.2.blocks.25.dcn.output_proj.weight", + "backbone.levels.2.blocks.25.mlp.fc1.weight", + "backbone.levels.2.blocks.25.mlp.fc2.weight" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.norm1.0.weight", + "backbone.levels.2.blocks.26.norm1.0.bias", + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.26.dcn.offset.bias", + "backbone.levels.2.blocks.26.dcn.mask.bias", + "backbone.levels.2.blocks.26.dcn.input_proj.bias", + "backbone.levels.2.blocks.26.dcn.output_proj.bias", + "backbone.levels.2.blocks.26.norm2.0.weight", + "backbone.levels.2.blocks.26.norm2.0.bias", + "backbone.levels.2.blocks.26.mlp.fc1.bias", + "backbone.levels.2.blocks.26.mlp.fc2.bias", + "backbone.levels.2.blocks.26.res_post_norm1.0.weight", + "backbone.levels.2.blocks.26.res_post_norm1.0.bias", + "backbone.levels.2.blocks.26.res_post_norm2.0.weight", + "backbone.levels.2.blocks.26.res_post_norm2.0.bias" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.26.dcn.offset.weight", + "backbone.levels.2.blocks.26.dcn.mask.weight", + "backbone.levels.2.blocks.26.dcn.input_proj.weight", + "backbone.levels.2.blocks.26.dcn.output_proj.weight", + "backbone.levels.2.blocks.26.mlp.fc1.weight", + "backbone.levels.2.blocks.26.mlp.fc2.weight" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.norm1.0.weight", + "backbone.levels.2.blocks.27.norm1.0.bias", + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.27.dcn.offset.bias", + "backbone.levels.2.blocks.27.dcn.mask.bias", + "backbone.levels.2.blocks.27.dcn.input_proj.bias", + "backbone.levels.2.blocks.27.dcn.output_proj.bias", + "backbone.levels.2.blocks.27.norm2.0.weight", + "backbone.levels.2.blocks.27.norm2.0.bias", + "backbone.levels.2.blocks.27.mlp.fc1.bias", + "backbone.levels.2.blocks.27.mlp.fc2.bias", + "backbone.levels.2.blocks.27.res_post_norm1.0.weight", + "backbone.levels.2.blocks.27.res_post_norm1.0.bias", + "backbone.levels.2.blocks.27.res_post_norm2.0.weight", + "backbone.levels.2.blocks.27.res_post_norm2.0.bias" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.0 + }, + "layer_40_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.27.dcn.offset.weight", + "backbone.levels.2.blocks.27.dcn.mask.weight", + "backbone.levels.2.blocks.27.dcn.input_proj.weight", + "backbone.levels.2.blocks.27.dcn.output_proj.weight", + "backbone.levels.2.blocks.27.mlp.fc1.weight", + "backbone.levels.2.blocks.27.mlp.fc2.weight" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.05 + }, + "layer_41_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.norm1.0.weight", + "backbone.levels.2.blocks.28.norm1.0.bias", + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.28.dcn.offset.bias", + "backbone.levels.2.blocks.28.dcn.mask.bias", + "backbone.levels.2.blocks.28.dcn.input_proj.bias", + "backbone.levels.2.blocks.28.dcn.output_proj.bias", + "backbone.levels.2.blocks.28.norm2.0.weight", + "backbone.levels.2.blocks.28.norm2.0.bias", + "backbone.levels.2.blocks.28.mlp.fc1.bias", + "backbone.levels.2.blocks.28.mlp.fc2.bias", + "backbone.levels.2.blocks.28.res_post_norm1.0.weight", + "backbone.levels.2.blocks.28.res_post_norm1.0.bias", + "backbone.levels.2.blocks.28.res_post_norm2.0.weight", + "backbone.levels.2.blocks.28.res_post_norm2.0.bias" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.0 + }, + "layer_41_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.28.dcn.offset.weight", + "backbone.levels.2.blocks.28.dcn.mask.weight", + "backbone.levels.2.blocks.28.dcn.input_proj.weight", + "backbone.levels.2.blocks.28.dcn.output_proj.weight", + "backbone.levels.2.blocks.28.mlp.fc1.weight", + "backbone.levels.2.blocks.28.mlp.fc2.weight" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.05 + }, + "layer_42_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.norm1.0.weight", + "backbone.levels.2.blocks.29.norm1.0.bias", + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.29.dcn.offset.bias", + "backbone.levels.2.blocks.29.dcn.mask.bias", + "backbone.levels.2.blocks.29.dcn.input_proj.bias", + "backbone.levels.2.blocks.29.dcn.output_proj.bias", + "backbone.levels.2.blocks.29.norm2.0.weight", + "backbone.levels.2.blocks.29.norm2.0.bias", + "backbone.levels.2.blocks.29.mlp.fc1.bias", + "backbone.levels.2.blocks.29.mlp.fc2.bias", + "backbone.levels.2.blocks.29.res_post_norm1.0.weight", + "backbone.levels.2.blocks.29.res_post_norm1.0.bias", + "backbone.levels.2.blocks.29.res_post_norm2.0.weight", + "backbone.levels.2.blocks.29.res_post_norm2.0.bias" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.0 + }, + "layer_42_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.29.dcn.offset.weight", + "backbone.levels.2.blocks.29.dcn.mask.weight", + "backbone.levels.2.blocks.29.dcn.input_proj.weight", + "backbone.levels.2.blocks.29.dcn.output_proj.weight", + "backbone.levels.2.blocks.29.mlp.fc1.weight", + "backbone.levels.2.blocks.29.mlp.fc2.weight" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.05 + }, + "layer_43_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.norm1.0.weight", + "backbone.levels.2.blocks.30.norm1.0.bias", + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.30.dcn.offset.bias", + "backbone.levels.2.blocks.30.dcn.mask.bias", + "backbone.levels.2.blocks.30.dcn.input_proj.bias", + "backbone.levels.2.blocks.30.dcn.output_proj.bias", + "backbone.levels.2.blocks.30.norm2.0.weight", + "backbone.levels.2.blocks.30.norm2.0.bias", + "backbone.levels.2.blocks.30.mlp.fc1.bias", + "backbone.levels.2.blocks.30.mlp.fc2.bias", + "backbone.levels.2.blocks.30.res_post_norm1.0.weight", + "backbone.levels.2.blocks.30.res_post_norm1.0.bias", + "backbone.levels.2.blocks.30.res_post_norm2.0.weight", + "backbone.levels.2.blocks.30.res_post_norm2.0.bias" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.0 + }, + "layer_43_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.30.dcn.offset.weight", + "backbone.levels.2.blocks.30.dcn.mask.weight", + "backbone.levels.2.blocks.30.dcn.input_proj.weight", + "backbone.levels.2.blocks.30.dcn.output_proj.weight", + "backbone.levels.2.blocks.30.mlp.fc1.weight", + "backbone.levels.2.blocks.30.mlp.fc2.weight" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.05 + }, + "layer_44_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.norm1.0.weight", + "backbone.levels.2.blocks.31.norm1.0.bias", + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.31.dcn.offset.bias", + "backbone.levels.2.blocks.31.dcn.mask.bias", + "backbone.levels.2.blocks.31.dcn.input_proj.bias", + "backbone.levels.2.blocks.31.dcn.output_proj.bias", + "backbone.levels.2.blocks.31.norm2.0.weight", + "backbone.levels.2.blocks.31.norm2.0.bias", + "backbone.levels.2.blocks.31.mlp.fc1.bias", + "backbone.levels.2.blocks.31.mlp.fc2.bias", + "backbone.levels.2.blocks.31.res_post_norm1.0.weight", + "backbone.levels.2.blocks.31.res_post_norm1.0.bias", + "backbone.levels.2.blocks.31.res_post_norm2.0.weight", + "backbone.levels.2.blocks.31.res_post_norm2.0.bias" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.0 + }, + "layer_44_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.31.dcn.offset.weight", + "backbone.levels.2.blocks.31.dcn.mask.weight", + "backbone.levels.2.blocks.31.dcn.input_proj.weight", + "backbone.levels.2.blocks.31.dcn.output_proj.weight", + "backbone.levels.2.blocks.31.mlp.fc1.weight", + "backbone.levels.2.blocks.31.mlp.fc2.weight" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.05 + }, + "layer_45_no_decay": { + "param_names": [ + "backbone.levels.2.norm.0.weight", + "backbone.levels.2.norm.0.bias", + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias", + "backbone.levels.3.blocks.0.res_post_norm1.0.weight", + "backbone.levels.3.blocks.0.res_post_norm1.0.bias", + "backbone.levels.3.blocks.0.res_post_norm2.0.weight", + "backbone.levels.3.blocks.0.res_post_norm2.0.bias", + "backbone.levels.3.norm.0.weight", + "backbone.levels.3.norm.0.bias" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.0 + }, + "layer_45_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.05 + }, + "layer_46_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias", + "backbone.levels.3.blocks.1.res_post_norm1.0.weight", + "backbone.levels.3.blocks.1.res_post_norm1.0.bias", + "backbone.levels.3.blocks.1.res_post_norm2.0.weight", + "backbone.levels.3.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.0 + }, + "layer_46_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.05 + }, + "layer_47_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias", + "backbone.levels.3.blocks.2.res_post_norm1.0.weight", + "backbone.levels.3.blocks.2.res_post_norm1.0.bias", + "backbone.levels.3.blocks.2.res_post_norm2.0.weight", + "backbone.levels.3.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.0 + }, + "layer_47_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.05 + }, + "layer_48_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias", + "backbone.levels.3.blocks.3.res_post_norm1.0.weight", + "backbone.levels.3.blocks.3.res_post_norm1.0.bias", + "backbone.levels.3.blocks.3.res_post_norm2.0.weight", + "backbone.levels.3.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.0 + }, + "layer_48_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.05 + }, + "layer_49_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias", + "backbone.levels.3.blocks.4.res_post_norm1.0.weight", + "backbone.levels.3.blocks.4.res_post_norm1.0.bias", + "backbone.levels.3.blocks.4.res_post_norm2.0.weight", + "backbone.levels.3.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.0 + }, + "layer_49_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.05 + }, + "layer_50_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.norm1.0.weight", + "backbone.levels.3.blocks.5.norm1.0.bias", + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.5.dcn.offset.bias", + "backbone.levels.3.blocks.5.dcn.mask.bias", + "backbone.levels.3.blocks.5.dcn.input_proj.bias", + "backbone.levels.3.blocks.5.dcn.output_proj.bias", + "backbone.levels.3.blocks.5.norm2.0.weight", + "backbone.levels.3.blocks.5.norm2.0.bias", + "backbone.levels.3.blocks.5.mlp.fc1.bias", + "backbone.levels.3.blocks.5.mlp.fc2.bias", + "backbone.levels.3.blocks.5.res_post_norm1.0.weight", + "backbone.levels.3.blocks.5.res_post_norm1.0.bias", + "backbone.levels.3.blocks.5.res_post_norm2.0.weight", + "backbone.levels.3.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.0 + }, + "layer_50_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.5.dcn.offset.weight", + "backbone.levels.3.blocks.5.dcn.mask.weight", + "backbone.levels.3.blocks.5.dcn.input_proj.weight", + "backbone.levels.3.blocks.5.dcn.output_proj.weight", + "backbone.levels.3.blocks.5.mlp.fc1.weight", + "backbone.levels.3.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.05 + }, + "layer_51_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.weight", + "decode_head.pixel_decoder.input_convs.1.conv.weight", + "decode_head.pixel_decoder.input_convs.2.conv.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.level_encoding.weight", + "decode_head.pixel_decoder.lateral_convs.0.conv.weight", + "decode_head.pixel_decoder.output_convs.0.conv.weight", + "decode_head.pixel_decoder.mask_feature.weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.05 + }, + "layer_51_no_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.bias", + "decode_head.pixel_decoder.input_convs.0.gn.weight", + "decode_head.pixel_decoder.input_convs.0.gn.bias", + "decode_head.pixel_decoder.input_convs.1.conv.bias", + "decode_head.pixel_decoder.input_convs.1.gn.weight", + "decode_head.pixel_decoder.input_convs.1.gn.bias", + "decode_head.pixel_decoder.input_convs.2.conv.bias", + "decode_head.pixel_decoder.input_convs.2.gn.weight", + "decode_head.pixel_decoder.input_convs.2.gn.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.bias", + "decode_head.pixel_decoder.lateral_convs.0.gn.weight", + "decode_head.pixel_decoder.lateral_convs.0.gn.bias", + "decode_head.pixel_decoder.output_convs.0.gn.weight", + "decode_head.pixel_decoder.output_convs.0.gn.bias", + "decode_head.pixel_decoder.mask_feature.bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.0.norms.0.weight", + "decode_head.transformer_decoder.layers.0.norms.0.bias", + "decode_head.transformer_decoder.layers.0.norms.1.weight", + "decode_head.transformer_decoder.layers.0.norms.1.bias", + "decode_head.transformer_decoder.layers.0.norms.2.weight", + "decode_head.transformer_decoder.layers.0.norms.2.bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.1.norms.0.weight", + "decode_head.transformer_decoder.layers.1.norms.0.bias", + "decode_head.transformer_decoder.layers.1.norms.1.weight", + "decode_head.transformer_decoder.layers.1.norms.1.bias", + "decode_head.transformer_decoder.layers.1.norms.2.weight", + "decode_head.transformer_decoder.layers.1.norms.2.bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.2.norms.0.weight", + "decode_head.transformer_decoder.layers.2.norms.0.bias", + "decode_head.transformer_decoder.layers.2.norms.1.weight", + "decode_head.transformer_decoder.layers.2.norms.1.bias", + "decode_head.transformer_decoder.layers.2.norms.2.weight", + "decode_head.transformer_decoder.layers.2.norms.2.bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.3.norms.0.weight", + "decode_head.transformer_decoder.layers.3.norms.0.bias", + "decode_head.transformer_decoder.layers.3.norms.1.weight", + "decode_head.transformer_decoder.layers.3.norms.1.bias", + "decode_head.transformer_decoder.layers.3.norms.2.weight", + "decode_head.transformer_decoder.layers.3.norms.2.bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.4.norms.0.weight", + "decode_head.transformer_decoder.layers.4.norms.0.bias", + "decode_head.transformer_decoder.layers.4.norms.1.weight", + "decode_head.transformer_decoder.layers.4.norms.1.bias", + "decode_head.transformer_decoder.layers.4.norms.2.weight", + "decode_head.transformer_decoder.layers.4.norms.2.bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.5.norms.0.weight", + "decode_head.transformer_decoder.layers.5.norms.0.bias", + "decode_head.transformer_decoder.layers.5.norms.1.weight", + "decode_head.transformer_decoder.layers.5.norms.1.bias", + "decode_head.transformer_decoder.layers.5.norms.2.weight", + "decode_head.transformer_decoder.layers.5.norms.2.bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.6.norms.0.weight", + "decode_head.transformer_decoder.layers.6.norms.0.bias", + "decode_head.transformer_decoder.layers.6.norms.1.weight", + "decode_head.transformer_decoder.layers.6.norms.1.bias", + "decode_head.transformer_decoder.layers.6.norms.2.weight", + "decode_head.transformer_decoder.layers.6.norms.2.bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.7.norms.0.weight", + "decode_head.transformer_decoder.layers.7.norms.0.bias", + "decode_head.transformer_decoder.layers.7.norms.1.weight", + "decode_head.transformer_decoder.layers.7.norms.1.bias", + "decode_head.transformer_decoder.layers.7.norms.2.weight", + "decode_head.transformer_decoder.layers.7.norms.2.bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.8.norms.0.weight", + "decode_head.transformer_decoder.layers.8.norms.0.bias", + "decode_head.transformer_decoder.layers.8.norms.1.weight", + "decode_head.transformer_decoder.layers.8.norms.1.bias", + "decode_head.transformer_decoder.layers.8.norms.2.weight", + "decode_head.transformer_decoder.layers.8.norms.2.bias", + "decode_head.transformer_decoder.post_norm.weight", + "decode_head.transformer_decoder.post_norm.bias" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.0 + } +} +2025-05-28 02:23:15,310 - mmseg - INFO - Loaded 1 images +2025-05-28 02:23:15,311 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth +2025-05-28 02:23:21,280 - mmseg - INFO - Start running, host: yiming@pasteur2.stanford.edu, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 02:23:21,280 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 02:23:21,281 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters +2025-05-28 02:23:21,281 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022236.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022236.log.json new file mode 100644 index 0000000000000000000000000000000000000000..f45311e6adf23ccee2e845c14d50ad94f44a2343 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_022236.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0: NVIDIA TITAN RTX\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 12.2, V12.2.91\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 1501318398, "exp_name": "mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "num_things_classes = 100\nnum_stuff_classes = 50\nnum_classes = 19\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderMask2Former',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=320,\n depths=[6, 6, 32, 6],\n groups=[10, 20, 40, 80],\n mlp_ratio=4.0,\n drop_path_rate=0.5,\n norm_layer='LN',\n layer_scale=None,\n offset_scale=1.0,\n post_norm=False,\n dw_kernel_size=5,\n res_post_norm=True,\n level2_post_norm=True,\n level2_post_norm_block_ids=[5, 11, 17, 23, 29],\n center_feature_scale=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='Mask2FormerHead',\n in_channels=[320, 640, 1280, 2560],\n feat_channels=256,\n out_channels=256,\n in_index=[0, 1, 2, 3],\n num_things_classes=100,\n num_stuff_classes=50,\n num_queries=100,\n num_transformer_feat_level=3,\n pixel_decoder=dict(\n type='MSDeformAttnPixelDecoder',\n num_outs=3,\n norm_cfg=dict(type='GN', num_groups=32),\n act_cfg=dict(type='ReLU'),\n encoder=dict(\n type='DetrTransformerEncoder',\n num_layers=6,\n transformerlayers=dict(\n type='BaseTransformerLayer',\n attn_cfgs=dict(\n type='MultiScaleDeformableAttention',\n embed_dims=256,\n num_heads=8,\n num_levels=3,\n num_points=4,\n im2col_step=64,\n dropout=0.0,\n batch_first=False,\n norm_cfg=None,\n init_cfg=None),\n ffn_cfgs=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n with_cp=False),\n operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n init_cfg=None),\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n init_cfg=None),\n enforce_decoder_input_project=False,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n transformer_decoder=dict(\n type='DetrTransformerDecoder',\n return_intermediate=True,\n num_layers=9,\n transformerlayers=dict(\n type='DetrTransformerDecoderLayer',\n attn_cfgs=dict(\n type='MultiheadAttention',\n embed_dims=256,\n num_heads=8,\n attn_drop=0.0,\n proj_drop=0.0,\n dropout_layer=None,\n batch_first=False),\n ffn_cfgs=dict(\n embed_dims=256,\n feedforward_channels=2048,\n num_fcs=2,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_drop=0.0,\n dropout_layer=None,\n add_identity=True,\n with_cp=False),\n feedforward_channels=2048,\n operation_order=('cross_attn', 'norm', 'self_attn', 'norm',\n 'ffn', 'norm')),\n init_cfg=None),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=2.0,\n reduction='mean',\n class_weight=[\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1\n ]),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=5.0),\n loss_dice=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n reduction='mean',\n naive_dice=True,\n eps=1.0,\n loss_weight=5.0),\n num_classes=19,\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512))),\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512)),\n init_cfg=None)\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (1024, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(\n type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=1e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=50,\n layer_decay_rate=0.95,\n depths=[6, 6, 32, 6],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 1)\nauto_resume = False\ndevice = 'cuda'\nseed = 1501318398\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_023516.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_023516.log new file mode 100644 index 0000000000000000000000000000000000000000..c5044772c70cff618a5b82d02e105710c9640796 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_023516.log @@ -0,0 +1,7658 @@ +2025-05-28 02:35:16,176 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:35:16,177 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:35:16,219 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:35:16,220 - mmseg - INFO - Distributed training: True +2025-05-28 02:35:16,654 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:35:16,655 - mmseg - INFO - Set random seed to 470681994, deterministic: False +2025-05-28 02:35:16,655 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:35:16,656 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:35:16,656 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:35:16,656 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:35:16,656 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:35:16,656 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:35:16,656 - mmseg - INFO - res_post_norm: True +2025-05-28 02:35:16,656 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:35:37,788 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 02:35:49,301 - mmseg - INFO - Loaded 1 images +2025-05-28 02:35:49,302 - mmseg - INFO - Built training dataset from config: {'type': 'CityscapesDataset', 'data_root': '/pasteur/u/yiming/homework4/cityscapes', 'img_dir': 'leftImg8bit/', 'ann_dir': 'gtFine/', 'pipeline': [{'type': 'LoadImageFromFile'}, {'type': 'LoadAnnotations'}, {'type': 'Resize', 'img_scale': (2048, 1024), 'ratio_range': (0.5, 2.0)}, {'type': 'RandomCrop', 'crop_size': (1024, 1024), 'cat_max_ratio': 0.75}, {'type': 'RandomFlip', 'prob': 0.5}, {'type': 'PhotoMetricDistortion'}, {'type': 'Normalize', 'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True}, {'type': 'Pad', 'size': (1024, 1024), 'pad_val': 0, 'seg_pad_val': 255}, {'type': 'ToMask'}, {'type': 'DefaultFormatBundle'}, {'type': 'Collect', 'keys': ['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']}], 'split': 'splits/fold_1_train_split.txt'} +2025-05-28 02:35:49,302 - mmseg - INFO - Number of samples in training dataset: 1 +2025-05-28 02:35:55,140 - mmseg - INFO - {'num_layers': 50, 'layer_decay_rate': 0.95, 'depths': [6, 6, 32, 6], 'offset_lr_scale': 1.0} +2025-05-28 02:35:55,140 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.950000 - 52 +2025-05-28 02:35:55,150 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight", + "decode_head.query_embed.weight", + "decode_head.query_feat.weight", + "decode_head.level_embed.weight", + "decode_head.cls_embed.weight", + "decode_head.mask_embed.0.weight", + "decode_head.mask_embed.2.weight", + "decode_head.mask_embed.4.weight" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias", + "decode_head.cls_embed.bias", + "decode_head.mask_embed.0.bias", + "decode_head.mask_embed.2.bias", + "decode_head.mask_embed.4.bias" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias", + "backbone.levels.0.blocks.0.res_post_norm1.0.weight", + "backbone.levels.0.blocks.0.res_post_norm1.0.bias", + "backbone.levels.0.blocks.0.res_post_norm2.0.weight", + "backbone.levels.0.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias", + "backbone.levels.0.blocks.1.res_post_norm1.0.weight", + "backbone.levels.0.blocks.1.res_post_norm1.0.bias", + "backbone.levels.0.blocks.1.res_post_norm2.0.weight", + "backbone.levels.0.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias", + "backbone.levels.0.blocks.2.res_post_norm1.0.weight", + "backbone.levels.0.blocks.2.res_post_norm1.0.bias", + "backbone.levels.0.blocks.2.res_post_norm2.0.weight", + "backbone.levels.0.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias", + "backbone.levels.0.blocks.3.res_post_norm1.0.weight", + "backbone.levels.0.blocks.3.res_post_norm1.0.bias", + "backbone.levels.0.blocks.3.res_post_norm2.0.weight", + "backbone.levels.0.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias", + "backbone.levels.0.blocks.4.res_post_norm1.0.weight", + "backbone.levels.0.blocks.4.res_post_norm1.0.bias", + "backbone.levels.0.blocks.4.res_post_norm2.0.weight", + "backbone.levels.0.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.norm1.0.weight", + "backbone.levels.0.blocks.5.norm1.0.bias", + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.5.dcn.offset.bias", + "backbone.levels.0.blocks.5.dcn.mask.bias", + "backbone.levels.0.blocks.5.dcn.input_proj.bias", + "backbone.levels.0.blocks.5.dcn.output_proj.bias", + "backbone.levels.0.blocks.5.norm2.0.weight", + "backbone.levels.0.blocks.5.norm2.0.bias", + "backbone.levels.0.blocks.5.mlp.fc1.bias", + "backbone.levels.0.blocks.5.mlp.fc2.bias", + "backbone.levels.0.blocks.5.res_post_norm1.0.weight", + "backbone.levels.0.blocks.5.res_post_norm1.0.bias", + "backbone.levels.0.blocks.5.res_post_norm2.0.weight", + "backbone.levels.0.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.0 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.5.dcn.offset.weight", + "backbone.levels.0.blocks.5.dcn.mask.weight", + "backbone.levels.0.blocks.5.dcn.input_proj.weight", + "backbone.levels.0.blocks.5.dcn.output_proj.weight", + "backbone.levels.0.blocks.5.mlp.fc1.weight", + "backbone.levels.0.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.05 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.0.norm.0.weight", + "backbone.levels.0.norm.0.bias", + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias", + "backbone.levels.1.blocks.0.res_post_norm1.0.weight", + "backbone.levels.1.blocks.0.res_post_norm1.0.bias", + "backbone.levels.1.blocks.0.res_post_norm2.0.weight", + "backbone.levels.1.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias", + "backbone.levels.1.blocks.1.res_post_norm1.0.weight", + "backbone.levels.1.blocks.1.res_post_norm1.0.bias", + "backbone.levels.1.blocks.1.res_post_norm2.0.weight", + "backbone.levels.1.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias", + "backbone.levels.1.blocks.2.res_post_norm1.0.weight", + "backbone.levels.1.blocks.2.res_post_norm1.0.bias", + "backbone.levels.1.blocks.2.res_post_norm2.0.weight", + "backbone.levels.1.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias", + "backbone.levels.1.blocks.3.res_post_norm1.0.weight", + "backbone.levels.1.blocks.3.res_post_norm1.0.bias", + "backbone.levels.1.blocks.3.res_post_norm2.0.weight", + "backbone.levels.1.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias", + "backbone.levels.1.blocks.4.res_post_norm1.0.weight", + "backbone.levels.1.blocks.4.res_post_norm1.0.bias", + "backbone.levels.1.blocks.4.res_post_norm2.0.weight", + "backbone.levels.1.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.0 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.05 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.norm1.0.weight", + "backbone.levels.1.blocks.5.norm1.0.bias", + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.5.dcn.offset.bias", + "backbone.levels.1.blocks.5.dcn.mask.bias", + "backbone.levels.1.blocks.5.dcn.input_proj.bias", + "backbone.levels.1.blocks.5.dcn.output_proj.bias", + "backbone.levels.1.blocks.5.norm2.0.weight", + "backbone.levels.1.blocks.5.norm2.0.bias", + "backbone.levels.1.blocks.5.mlp.fc1.bias", + "backbone.levels.1.blocks.5.mlp.fc2.bias", + "backbone.levels.1.blocks.5.res_post_norm1.0.weight", + "backbone.levels.1.blocks.5.res_post_norm1.0.bias", + "backbone.levels.1.blocks.5.res_post_norm2.0.weight", + "backbone.levels.1.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.5.dcn.offset.weight", + "backbone.levels.1.blocks.5.dcn.mask.weight", + "backbone.levels.1.blocks.5.dcn.input_proj.weight", + "backbone.levels.1.blocks.5.dcn.output_proj.weight", + "backbone.levels.1.blocks.5.mlp.fc1.weight", + "backbone.levels.1.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.1.norm.0.weight", + "backbone.levels.1.norm.0.bias", + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias", + "backbone.levels.2.blocks.0.res_post_norm1.0.weight", + "backbone.levels.2.blocks.0.res_post_norm1.0.bias", + "backbone.levels.2.blocks.0.res_post_norm2.0.weight", + "backbone.levels.2.blocks.0.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.0.0.weight", + "backbone.levels.2.post_norms.0.0.bias" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias", + "backbone.levels.2.blocks.1.res_post_norm1.0.weight", + "backbone.levels.2.blocks.1.res_post_norm1.0.bias", + "backbone.levels.2.blocks.1.res_post_norm2.0.weight", + "backbone.levels.2.blocks.1.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.1.0.weight", + "backbone.levels.2.post_norms.1.0.bias" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias", + "backbone.levels.2.blocks.2.res_post_norm1.0.weight", + "backbone.levels.2.blocks.2.res_post_norm1.0.bias", + "backbone.levels.2.blocks.2.res_post_norm2.0.weight", + "backbone.levels.2.blocks.2.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.2.0.weight", + "backbone.levels.2.post_norms.2.0.bias" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias", + "backbone.levels.2.blocks.3.res_post_norm1.0.weight", + "backbone.levels.2.blocks.3.res_post_norm1.0.bias", + "backbone.levels.2.blocks.3.res_post_norm2.0.weight", + "backbone.levels.2.blocks.3.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.3.0.weight", + "backbone.levels.2.post_norms.3.0.bias" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias", + "backbone.levels.2.blocks.4.res_post_norm1.0.weight", + "backbone.levels.2.blocks.4.res_post_norm1.0.bias", + "backbone.levels.2.blocks.4.res_post_norm2.0.weight", + "backbone.levels.2.blocks.4.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.4.0.weight", + "backbone.levels.2.post_norms.4.0.bias" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias", + "backbone.levels.2.blocks.5.res_post_norm1.0.weight", + "backbone.levels.2.blocks.5.res_post_norm1.0.bias", + "backbone.levels.2.blocks.5.res_post_norm2.0.weight", + "backbone.levels.2.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias", + "backbone.levels.2.blocks.6.res_post_norm1.0.weight", + "backbone.levels.2.blocks.6.res_post_norm1.0.bias", + "backbone.levels.2.blocks.6.res_post_norm2.0.weight", + "backbone.levels.2.blocks.6.res_post_norm2.0.bias" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias", + "backbone.levels.2.blocks.7.res_post_norm1.0.weight", + "backbone.levels.2.blocks.7.res_post_norm1.0.bias", + "backbone.levels.2.blocks.7.res_post_norm2.0.weight", + "backbone.levels.2.blocks.7.res_post_norm2.0.bias" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias", + "backbone.levels.2.blocks.8.res_post_norm1.0.weight", + "backbone.levels.2.blocks.8.res_post_norm1.0.bias", + "backbone.levels.2.blocks.8.res_post_norm2.0.weight", + "backbone.levels.2.blocks.8.res_post_norm2.0.bias" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias", + "backbone.levels.2.blocks.9.res_post_norm1.0.weight", + "backbone.levels.2.blocks.9.res_post_norm1.0.bias", + "backbone.levels.2.blocks.9.res_post_norm2.0.weight", + "backbone.levels.2.blocks.9.res_post_norm2.0.bias" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias", + "backbone.levels.2.blocks.10.res_post_norm1.0.weight", + "backbone.levels.2.blocks.10.res_post_norm1.0.bias", + "backbone.levels.2.blocks.10.res_post_norm2.0.weight", + "backbone.levels.2.blocks.10.res_post_norm2.0.bias" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias", + "backbone.levels.2.blocks.11.res_post_norm1.0.weight", + "backbone.levels.2.blocks.11.res_post_norm1.0.bias", + "backbone.levels.2.blocks.11.res_post_norm2.0.weight", + "backbone.levels.2.blocks.11.res_post_norm2.0.bias" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias", + "backbone.levels.2.blocks.12.res_post_norm1.0.weight", + "backbone.levels.2.blocks.12.res_post_norm1.0.bias", + "backbone.levels.2.blocks.12.res_post_norm2.0.weight", + "backbone.levels.2.blocks.12.res_post_norm2.0.bias" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias", + "backbone.levels.2.blocks.13.res_post_norm1.0.weight", + "backbone.levels.2.blocks.13.res_post_norm1.0.bias", + "backbone.levels.2.blocks.13.res_post_norm2.0.weight", + "backbone.levels.2.blocks.13.res_post_norm2.0.bias" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias", + "backbone.levels.2.blocks.14.res_post_norm1.0.weight", + "backbone.levels.2.blocks.14.res_post_norm1.0.bias", + "backbone.levels.2.blocks.14.res_post_norm2.0.weight", + "backbone.levels.2.blocks.14.res_post_norm2.0.bias" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias", + "backbone.levels.2.blocks.15.res_post_norm1.0.weight", + "backbone.levels.2.blocks.15.res_post_norm1.0.bias", + "backbone.levels.2.blocks.15.res_post_norm2.0.weight", + "backbone.levels.2.blocks.15.res_post_norm2.0.bias" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias", + "backbone.levels.2.blocks.16.res_post_norm1.0.weight", + "backbone.levels.2.blocks.16.res_post_norm1.0.bias", + "backbone.levels.2.blocks.16.res_post_norm2.0.weight", + "backbone.levels.2.blocks.16.res_post_norm2.0.bias" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias", + "backbone.levels.2.blocks.17.res_post_norm1.0.weight", + "backbone.levels.2.blocks.17.res_post_norm1.0.bias", + "backbone.levels.2.blocks.17.res_post_norm2.0.weight", + "backbone.levels.2.blocks.17.res_post_norm2.0.bias" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias", + "backbone.levels.2.blocks.18.res_post_norm1.0.weight", + "backbone.levels.2.blocks.18.res_post_norm1.0.bias", + "backbone.levels.2.blocks.18.res_post_norm2.0.weight", + "backbone.levels.2.blocks.18.res_post_norm2.0.bias" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias", + "backbone.levels.2.blocks.19.res_post_norm1.0.weight", + "backbone.levels.2.blocks.19.res_post_norm1.0.bias", + "backbone.levels.2.blocks.19.res_post_norm2.0.weight", + "backbone.levels.2.blocks.19.res_post_norm2.0.bias" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias", + "backbone.levels.2.blocks.20.res_post_norm1.0.weight", + "backbone.levels.2.blocks.20.res_post_norm1.0.bias", + "backbone.levels.2.blocks.20.res_post_norm2.0.weight", + "backbone.levels.2.blocks.20.res_post_norm2.0.bias" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias", + "backbone.levels.2.blocks.21.res_post_norm1.0.weight", + "backbone.levels.2.blocks.21.res_post_norm1.0.bias", + "backbone.levels.2.blocks.21.res_post_norm2.0.weight", + "backbone.levels.2.blocks.21.res_post_norm2.0.bias" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias", + "backbone.levels.2.blocks.22.res_post_norm1.0.weight", + "backbone.levels.2.blocks.22.res_post_norm1.0.bias", + "backbone.levels.2.blocks.22.res_post_norm2.0.weight", + "backbone.levels.2.blocks.22.res_post_norm2.0.bias" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.0 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.05 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias", + "backbone.levels.2.blocks.23.res_post_norm1.0.weight", + "backbone.levels.2.blocks.23.res_post_norm1.0.bias", + "backbone.levels.2.blocks.23.res_post_norm2.0.weight", + "backbone.levels.2.blocks.23.res_post_norm2.0.bias" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.norm1.0.weight", + "backbone.levels.2.blocks.24.norm1.0.bias", + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.24.dcn.offset.bias", + "backbone.levels.2.blocks.24.dcn.mask.bias", + "backbone.levels.2.blocks.24.dcn.input_proj.bias", + "backbone.levels.2.blocks.24.dcn.output_proj.bias", + "backbone.levels.2.blocks.24.norm2.0.weight", + "backbone.levels.2.blocks.24.norm2.0.bias", + "backbone.levels.2.blocks.24.mlp.fc1.bias", + "backbone.levels.2.blocks.24.mlp.fc2.bias", + "backbone.levels.2.blocks.24.res_post_norm1.0.weight", + "backbone.levels.2.blocks.24.res_post_norm1.0.bias", + "backbone.levels.2.blocks.24.res_post_norm2.0.weight", + "backbone.levels.2.blocks.24.res_post_norm2.0.bias" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.24.dcn.offset.weight", + "backbone.levels.2.blocks.24.dcn.mask.weight", + "backbone.levels.2.blocks.24.dcn.input_proj.weight", + "backbone.levels.2.blocks.24.dcn.output_proj.weight", + "backbone.levels.2.blocks.24.mlp.fc1.weight", + "backbone.levels.2.blocks.24.mlp.fc2.weight" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.norm1.0.weight", + "backbone.levels.2.blocks.25.norm1.0.bias", + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.25.dcn.offset.bias", + "backbone.levels.2.blocks.25.dcn.mask.bias", + "backbone.levels.2.blocks.25.dcn.input_proj.bias", + "backbone.levels.2.blocks.25.dcn.output_proj.bias", + "backbone.levels.2.blocks.25.norm2.0.weight", + "backbone.levels.2.blocks.25.norm2.0.bias", + "backbone.levels.2.blocks.25.mlp.fc1.bias", + "backbone.levels.2.blocks.25.mlp.fc2.bias", + "backbone.levels.2.blocks.25.res_post_norm1.0.weight", + "backbone.levels.2.blocks.25.res_post_norm1.0.bias", + "backbone.levels.2.blocks.25.res_post_norm2.0.weight", + "backbone.levels.2.blocks.25.res_post_norm2.0.bias" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.25.dcn.offset.weight", + "backbone.levels.2.blocks.25.dcn.mask.weight", + "backbone.levels.2.blocks.25.dcn.input_proj.weight", + "backbone.levels.2.blocks.25.dcn.output_proj.weight", + "backbone.levels.2.blocks.25.mlp.fc1.weight", + "backbone.levels.2.blocks.25.mlp.fc2.weight" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.norm1.0.weight", + "backbone.levels.2.blocks.26.norm1.0.bias", + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.26.dcn.offset.bias", + "backbone.levels.2.blocks.26.dcn.mask.bias", + "backbone.levels.2.blocks.26.dcn.input_proj.bias", + "backbone.levels.2.blocks.26.dcn.output_proj.bias", + "backbone.levels.2.blocks.26.norm2.0.weight", + "backbone.levels.2.blocks.26.norm2.0.bias", + "backbone.levels.2.blocks.26.mlp.fc1.bias", + "backbone.levels.2.blocks.26.mlp.fc2.bias", + "backbone.levels.2.blocks.26.res_post_norm1.0.weight", + "backbone.levels.2.blocks.26.res_post_norm1.0.bias", + "backbone.levels.2.blocks.26.res_post_norm2.0.weight", + "backbone.levels.2.blocks.26.res_post_norm2.0.bias" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.26.dcn.offset.weight", + "backbone.levels.2.blocks.26.dcn.mask.weight", + "backbone.levels.2.blocks.26.dcn.input_proj.weight", + "backbone.levels.2.blocks.26.dcn.output_proj.weight", + "backbone.levels.2.blocks.26.mlp.fc1.weight", + "backbone.levels.2.blocks.26.mlp.fc2.weight" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.norm1.0.weight", + "backbone.levels.2.blocks.27.norm1.0.bias", + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.27.dcn.offset.bias", + "backbone.levels.2.blocks.27.dcn.mask.bias", + "backbone.levels.2.blocks.27.dcn.input_proj.bias", + "backbone.levels.2.blocks.27.dcn.output_proj.bias", + "backbone.levels.2.blocks.27.norm2.0.weight", + "backbone.levels.2.blocks.27.norm2.0.bias", + "backbone.levels.2.blocks.27.mlp.fc1.bias", + "backbone.levels.2.blocks.27.mlp.fc2.bias", + "backbone.levels.2.blocks.27.res_post_norm1.0.weight", + "backbone.levels.2.blocks.27.res_post_norm1.0.bias", + "backbone.levels.2.blocks.27.res_post_norm2.0.weight", + "backbone.levels.2.blocks.27.res_post_norm2.0.bias" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.0 + }, + "layer_40_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.27.dcn.offset.weight", + "backbone.levels.2.blocks.27.dcn.mask.weight", + "backbone.levels.2.blocks.27.dcn.input_proj.weight", + "backbone.levels.2.blocks.27.dcn.output_proj.weight", + "backbone.levels.2.blocks.27.mlp.fc1.weight", + "backbone.levels.2.blocks.27.mlp.fc2.weight" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.05 + }, + "layer_41_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.norm1.0.weight", + "backbone.levels.2.blocks.28.norm1.0.bias", + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.28.dcn.offset.bias", + "backbone.levels.2.blocks.28.dcn.mask.bias", + "backbone.levels.2.blocks.28.dcn.input_proj.bias", + "backbone.levels.2.blocks.28.dcn.output_proj.bias", + "backbone.levels.2.blocks.28.norm2.0.weight", + "backbone.levels.2.blocks.28.norm2.0.bias", + "backbone.levels.2.blocks.28.mlp.fc1.bias", + "backbone.levels.2.blocks.28.mlp.fc2.bias", + "backbone.levels.2.blocks.28.res_post_norm1.0.weight", + "backbone.levels.2.blocks.28.res_post_norm1.0.bias", + "backbone.levels.2.blocks.28.res_post_norm2.0.weight", + "backbone.levels.2.blocks.28.res_post_norm2.0.bias" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.0 + }, + "layer_41_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.28.dcn.offset.weight", + "backbone.levels.2.blocks.28.dcn.mask.weight", + "backbone.levels.2.blocks.28.dcn.input_proj.weight", + "backbone.levels.2.blocks.28.dcn.output_proj.weight", + "backbone.levels.2.blocks.28.mlp.fc1.weight", + "backbone.levels.2.blocks.28.mlp.fc2.weight" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.05 + }, + "layer_42_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.norm1.0.weight", + "backbone.levels.2.blocks.29.norm1.0.bias", + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.29.dcn.offset.bias", + "backbone.levels.2.blocks.29.dcn.mask.bias", + "backbone.levels.2.blocks.29.dcn.input_proj.bias", + "backbone.levels.2.blocks.29.dcn.output_proj.bias", + "backbone.levels.2.blocks.29.norm2.0.weight", + "backbone.levels.2.blocks.29.norm2.0.bias", + "backbone.levels.2.blocks.29.mlp.fc1.bias", + "backbone.levels.2.blocks.29.mlp.fc2.bias", + "backbone.levels.2.blocks.29.res_post_norm1.0.weight", + "backbone.levels.2.blocks.29.res_post_norm1.0.bias", + "backbone.levels.2.blocks.29.res_post_norm2.0.weight", + "backbone.levels.2.blocks.29.res_post_norm2.0.bias" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.0 + }, + "layer_42_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.29.dcn.offset.weight", + "backbone.levels.2.blocks.29.dcn.mask.weight", + "backbone.levels.2.blocks.29.dcn.input_proj.weight", + "backbone.levels.2.blocks.29.dcn.output_proj.weight", + "backbone.levels.2.blocks.29.mlp.fc1.weight", + "backbone.levels.2.blocks.29.mlp.fc2.weight" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.05 + }, + "layer_43_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.norm1.0.weight", + "backbone.levels.2.blocks.30.norm1.0.bias", + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.30.dcn.offset.bias", + "backbone.levels.2.blocks.30.dcn.mask.bias", + "backbone.levels.2.blocks.30.dcn.input_proj.bias", + "backbone.levels.2.blocks.30.dcn.output_proj.bias", + "backbone.levels.2.blocks.30.norm2.0.weight", + "backbone.levels.2.blocks.30.norm2.0.bias", + "backbone.levels.2.blocks.30.mlp.fc1.bias", + "backbone.levels.2.blocks.30.mlp.fc2.bias", + "backbone.levels.2.blocks.30.res_post_norm1.0.weight", + "backbone.levels.2.blocks.30.res_post_norm1.0.bias", + "backbone.levels.2.blocks.30.res_post_norm2.0.weight", + "backbone.levels.2.blocks.30.res_post_norm2.0.bias" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.0 + }, + "layer_43_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.30.dcn.offset.weight", + "backbone.levels.2.blocks.30.dcn.mask.weight", + "backbone.levels.2.blocks.30.dcn.input_proj.weight", + "backbone.levels.2.blocks.30.dcn.output_proj.weight", + "backbone.levels.2.blocks.30.mlp.fc1.weight", + "backbone.levels.2.blocks.30.mlp.fc2.weight" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.05 + }, + "layer_44_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.norm1.0.weight", + "backbone.levels.2.blocks.31.norm1.0.bias", + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.31.dcn.offset.bias", + "backbone.levels.2.blocks.31.dcn.mask.bias", + "backbone.levels.2.blocks.31.dcn.input_proj.bias", + "backbone.levels.2.blocks.31.dcn.output_proj.bias", + "backbone.levels.2.blocks.31.norm2.0.weight", + "backbone.levels.2.blocks.31.norm2.0.bias", + "backbone.levels.2.blocks.31.mlp.fc1.bias", + "backbone.levels.2.blocks.31.mlp.fc2.bias", + "backbone.levels.2.blocks.31.res_post_norm1.0.weight", + "backbone.levels.2.blocks.31.res_post_norm1.0.bias", + "backbone.levels.2.blocks.31.res_post_norm2.0.weight", + "backbone.levels.2.blocks.31.res_post_norm2.0.bias" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.0 + }, + "layer_44_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.31.dcn.offset.weight", + "backbone.levels.2.blocks.31.dcn.mask.weight", + "backbone.levels.2.blocks.31.dcn.input_proj.weight", + "backbone.levels.2.blocks.31.dcn.output_proj.weight", + "backbone.levels.2.blocks.31.mlp.fc1.weight", + "backbone.levels.2.blocks.31.mlp.fc2.weight" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.05 + }, + "layer_45_no_decay": { + "param_names": [ + "backbone.levels.2.norm.0.weight", + "backbone.levels.2.norm.0.bias", + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias", + "backbone.levels.3.blocks.0.res_post_norm1.0.weight", + "backbone.levels.3.blocks.0.res_post_norm1.0.bias", + "backbone.levels.3.blocks.0.res_post_norm2.0.weight", + "backbone.levels.3.blocks.0.res_post_norm2.0.bias", + "backbone.levels.3.norm.0.weight", + "backbone.levels.3.norm.0.bias" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.0 + }, + "layer_45_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.05 + }, + "layer_46_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias", + "backbone.levels.3.blocks.1.res_post_norm1.0.weight", + "backbone.levels.3.blocks.1.res_post_norm1.0.bias", + "backbone.levels.3.blocks.1.res_post_norm2.0.weight", + "backbone.levels.3.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.0 + }, + "layer_46_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.05 + }, + "layer_47_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias", + "backbone.levels.3.blocks.2.res_post_norm1.0.weight", + "backbone.levels.3.blocks.2.res_post_norm1.0.bias", + "backbone.levels.3.blocks.2.res_post_norm2.0.weight", + "backbone.levels.3.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.0 + }, + "layer_47_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.05 + }, + "layer_48_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias", + "backbone.levels.3.blocks.3.res_post_norm1.0.weight", + "backbone.levels.3.blocks.3.res_post_norm1.0.bias", + "backbone.levels.3.blocks.3.res_post_norm2.0.weight", + "backbone.levels.3.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.0 + }, + "layer_48_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.05 + }, + "layer_49_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias", + "backbone.levels.3.blocks.4.res_post_norm1.0.weight", + "backbone.levels.3.blocks.4.res_post_norm1.0.bias", + "backbone.levels.3.blocks.4.res_post_norm2.0.weight", + "backbone.levels.3.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.0 + }, + "layer_49_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.05 + }, + "layer_50_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.norm1.0.weight", + "backbone.levels.3.blocks.5.norm1.0.bias", + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.5.dcn.offset.bias", + "backbone.levels.3.blocks.5.dcn.mask.bias", + "backbone.levels.3.blocks.5.dcn.input_proj.bias", + "backbone.levels.3.blocks.5.dcn.output_proj.bias", + "backbone.levels.3.blocks.5.norm2.0.weight", + "backbone.levels.3.blocks.5.norm2.0.bias", + "backbone.levels.3.blocks.5.mlp.fc1.bias", + "backbone.levels.3.blocks.5.mlp.fc2.bias", + "backbone.levels.3.blocks.5.res_post_norm1.0.weight", + "backbone.levels.3.blocks.5.res_post_norm1.0.bias", + "backbone.levels.3.blocks.5.res_post_norm2.0.weight", + "backbone.levels.3.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.0 + }, + "layer_50_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.5.dcn.offset.weight", + "backbone.levels.3.blocks.5.dcn.mask.weight", + "backbone.levels.3.blocks.5.dcn.input_proj.weight", + "backbone.levels.3.blocks.5.dcn.output_proj.weight", + "backbone.levels.3.blocks.5.mlp.fc1.weight", + "backbone.levels.3.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.05 + }, + "layer_51_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.weight", + "decode_head.pixel_decoder.input_convs.1.conv.weight", + "decode_head.pixel_decoder.input_convs.2.conv.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.level_encoding.weight", + "decode_head.pixel_decoder.lateral_convs.0.conv.weight", + "decode_head.pixel_decoder.output_convs.0.conv.weight", + "decode_head.pixel_decoder.mask_feature.weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.05 + }, + "layer_51_no_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.bias", + "decode_head.pixel_decoder.input_convs.0.gn.weight", + "decode_head.pixel_decoder.input_convs.0.gn.bias", + "decode_head.pixel_decoder.input_convs.1.conv.bias", + "decode_head.pixel_decoder.input_convs.1.gn.weight", + "decode_head.pixel_decoder.input_convs.1.gn.bias", + "decode_head.pixel_decoder.input_convs.2.conv.bias", + "decode_head.pixel_decoder.input_convs.2.gn.weight", + "decode_head.pixel_decoder.input_convs.2.gn.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.bias", + "decode_head.pixel_decoder.lateral_convs.0.gn.weight", + "decode_head.pixel_decoder.lateral_convs.0.gn.bias", + "decode_head.pixel_decoder.output_convs.0.gn.weight", + "decode_head.pixel_decoder.output_convs.0.gn.bias", + "decode_head.pixel_decoder.mask_feature.bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.0.norms.0.weight", + "decode_head.transformer_decoder.layers.0.norms.0.bias", + "decode_head.transformer_decoder.layers.0.norms.1.weight", + "decode_head.transformer_decoder.layers.0.norms.1.bias", + "decode_head.transformer_decoder.layers.0.norms.2.weight", + "decode_head.transformer_decoder.layers.0.norms.2.bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.1.norms.0.weight", + "decode_head.transformer_decoder.layers.1.norms.0.bias", + "decode_head.transformer_decoder.layers.1.norms.1.weight", + "decode_head.transformer_decoder.layers.1.norms.1.bias", + "decode_head.transformer_decoder.layers.1.norms.2.weight", + "decode_head.transformer_decoder.layers.1.norms.2.bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.2.norms.0.weight", + "decode_head.transformer_decoder.layers.2.norms.0.bias", + "decode_head.transformer_decoder.layers.2.norms.1.weight", + "decode_head.transformer_decoder.layers.2.norms.1.bias", + "decode_head.transformer_decoder.layers.2.norms.2.weight", + "decode_head.transformer_decoder.layers.2.norms.2.bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.3.norms.0.weight", + "decode_head.transformer_decoder.layers.3.norms.0.bias", + "decode_head.transformer_decoder.layers.3.norms.1.weight", + "decode_head.transformer_decoder.layers.3.norms.1.bias", + "decode_head.transformer_decoder.layers.3.norms.2.weight", + "decode_head.transformer_decoder.layers.3.norms.2.bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.4.norms.0.weight", + "decode_head.transformer_decoder.layers.4.norms.0.bias", + "decode_head.transformer_decoder.layers.4.norms.1.weight", + "decode_head.transformer_decoder.layers.4.norms.1.bias", + "decode_head.transformer_decoder.layers.4.norms.2.weight", + "decode_head.transformer_decoder.layers.4.norms.2.bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.5.norms.0.weight", + "decode_head.transformer_decoder.layers.5.norms.0.bias", + "decode_head.transformer_decoder.layers.5.norms.1.weight", + "decode_head.transformer_decoder.layers.5.norms.1.bias", + "decode_head.transformer_decoder.layers.5.norms.2.weight", + "decode_head.transformer_decoder.layers.5.norms.2.bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.6.norms.0.weight", + "decode_head.transformer_decoder.layers.6.norms.0.bias", + "decode_head.transformer_decoder.layers.6.norms.1.weight", + "decode_head.transformer_decoder.layers.6.norms.1.bias", + "decode_head.transformer_decoder.layers.6.norms.2.weight", + "decode_head.transformer_decoder.layers.6.norms.2.bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.7.norms.0.weight", + "decode_head.transformer_decoder.layers.7.norms.0.bias", + "decode_head.transformer_decoder.layers.7.norms.1.weight", + "decode_head.transformer_decoder.layers.7.norms.1.bias", + "decode_head.transformer_decoder.layers.7.norms.2.weight", + "decode_head.transformer_decoder.layers.7.norms.2.bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.8.norms.0.weight", + "decode_head.transformer_decoder.layers.8.norms.0.bias", + "decode_head.transformer_decoder.layers.8.norms.1.weight", + "decode_head.transformer_decoder.layers.8.norms.1.bias", + "decode_head.transformer_decoder.layers.8.norms.2.weight", + "decode_head.transformer_decoder.layers.8.norms.2.bias", + "decode_head.transformer_decoder.post_norm.weight", + "decode_head.transformer_decoder.post_norm.bias" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.0 + } +} +2025-05-28 02:35:55,171 - mmseg - INFO - Loaded 1 images +2025-05-28 02:35:55,172 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth +2025-05-28 02:36:01,026 - mmseg - INFO - Start running, host: yiming@pasteur2.stanford.edu, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 02:36:01,027 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 02:36:01,027 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters +2025-05-28 02:36:01,027 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_023516.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_023516.log.json new file mode 100644 index 0000000000000000000000000000000000000000..480c45d2b9476fc4be244fedb68bfc62b3d0bd20 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_023516.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0: NVIDIA TITAN RTX\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 12.2, V12.2.91\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 470681994, "exp_name": "mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "num_things_classes = 100\nnum_stuff_classes = 50\nnum_classes = 19\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderMask2Former',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=320,\n depths=[6, 6, 32, 6],\n groups=[10, 20, 40, 80],\n mlp_ratio=4.0,\n drop_path_rate=0.5,\n norm_layer='LN',\n layer_scale=None,\n offset_scale=1.0,\n post_norm=False,\n dw_kernel_size=5,\n res_post_norm=True,\n level2_post_norm=True,\n level2_post_norm_block_ids=[5, 11, 17, 23, 29],\n center_feature_scale=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='Mask2FormerHead',\n in_channels=[320, 640, 1280, 2560],\n feat_channels=256,\n out_channels=256,\n in_index=[0, 1, 2, 3],\n num_things_classes=100,\n num_stuff_classes=50,\n num_queries=100,\n num_transformer_feat_level=3,\n pixel_decoder=dict(\n type='MSDeformAttnPixelDecoder',\n num_outs=3,\n norm_cfg=dict(type='GN', num_groups=32),\n act_cfg=dict(type='ReLU'),\n encoder=dict(\n type='DetrTransformerEncoder',\n num_layers=6,\n transformerlayers=dict(\n type='BaseTransformerLayer',\n attn_cfgs=dict(\n type='MultiScaleDeformableAttention',\n embed_dims=256,\n num_heads=8,\n num_levels=3,\n num_points=4,\n im2col_step=64,\n dropout=0.0,\n batch_first=False,\n norm_cfg=None,\n init_cfg=None),\n ffn_cfgs=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n with_cp=False),\n operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n init_cfg=None),\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n init_cfg=None),\n enforce_decoder_input_project=False,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n transformer_decoder=dict(\n type='DetrTransformerDecoder',\n return_intermediate=True,\n num_layers=9,\n transformerlayers=dict(\n type='DetrTransformerDecoderLayer',\n attn_cfgs=dict(\n type='MultiheadAttention',\n embed_dims=256,\n num_heads=8,\n attn_drop=0.0,\n proj_drop=0.0,\n dropout_layer=None,\n batch_first=False),\n ffn_cfgs=dict(\n embed_dims=256,\n feedforward_channels=2048,\n num_fcs=2,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_drop=0.0,\n dropout_layer=None,\n add_identity=True,\n with_cp=False),\n feedforward_channels=2048,\n operation_order=('cross_attn', 'norm', 'self_attn', 'norm',\n 'ffn', 'norm')),\n init_cfg=None),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=2.0,\n reduction='mean',\n class_weight=[\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1\n ]),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=5.0),\n loss_dice=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n reduction='mean',\n naive_dice=True,\n eps=1.0,\n loss_weight=5.0),\n num_classes=19,\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512))),\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512)),\n init_cfg=None)\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (1024, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(\n type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=1e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=50,\n layer_decay_rate=0.95,\n depths=[6, 6, 32, 6],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 1)\nauto_resume = False\ndevice = 'cuda'\nseed = 470681994\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024045.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024045.log new file mode 100644 index 0000000000000000000000000000000000000000..bd2339c2972d094c4b3dee988c7bc346d328e87a --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024045.log @@ -0,0 +1,5242 @@ +2025-05-28 02:40:45,226 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:40:45,227 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:40:45,265 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:40:45,265 - mmseg - INFO - Distributed training: True +2025-05-28 02:40:46,149 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split= + '/pasteur/u/yiming/homework4/segmentation/splits/fold_1_train_split.txt' + ), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split= + '/pasteur/u/yiming/homework4/segmentation/splits/fold_1_val_split.txt' + ), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:40:46,150 - mmseg - INFO - Set random seed to 251396623, deterministic: False +2025-05-28 02:40:46,150 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:40:46,151 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:40:46,151 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:40:46,151 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:40:46,151 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:40:46,151 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:40:46,151 - mmseg - INFO - res_post_norm: True +2025-05-28 02:40:46,151 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:41:07,432 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024940.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024940.log new file mode 100644 index 0000000000000000000000000000000000000000..d5596e2d969cce56acfc897d70980677b36f490c --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024940.log @@ -0,0 +1,7658 @@ +2025-05-28 02:49:40,165 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:49:40,166 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:49:40,209 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:49:40,210 - mmseg - INFO - Distributed training: True +2025-05-28 02:49:40,636 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:49:40,637 - mmseg - INFO - Set random seed to 1860035215, deterministic: False +2025-05-28 02:49:40,637 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:49:40,638 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:49:40,638 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:49:40,638 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:49:40,638 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:49:40,638 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:49:40,638 - mmseg - INFO - res_post_norm: True +2025-05-28 02:49:40,638 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:50:05,264 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 02:50:14,036 - mmseg - INFO - Loaded 2316 images +2025-05-28 02:50:14,036 - mmseg - INFO - Built training dataset from config: {'type': 'CityscapesDataset', 'data_root': '/pasteur/u/yiming/homework4/cityscapes', 'img_dir': 'leftImg8bit/', 'ann_dir': 'gtFine/', 'pipeline': [{'type': 'LoadImageFromFile'}, {'type': 'LoadAnnotations'}, {'type': 'Resize', 'img_scale': (2048, 1024), 'ratio_range': (0.5, 2.0)}, {'type': 'RandomCrop', 'crop_size': (1024, 1024), 'cat_max_ratio': 0.75}, {'type': 'RandomFlip', 'prob': 0.5}, {'type': 'PhotoMetricDistortion'}, {'type': 'Normalize', 'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True}, {'type': 'Pad', 'size': (1024, 1024), 'pad_val': 0, 'seg_pad_val': 255}, {'type': 'ToMask'}, {'type': 'DefaultFormatBundle'}, {'type': 'Collect', 'keys': ['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']}], 'split': 'splits/fold_1_train_split.txt'} +2025-05-28 02:50:14,037 - mmseg - INFO - Number of samples in training dataset: 2316 +2025-05-28 02:50:19,908 - mmseg - INFO - {'num_layers': 50, 'layer_decay_rate': 0.95, 'depths': [6, 6, 32, 6], 'offset_lr_scale': 1.0} +2025-05-28 02:50:19,908 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.950000 - 52 +2025-05-28 02:50:19,918 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight", + "decode_head.query_embed.weight", + "decode_head.query_feat.weight", + "decode_head.level_embed.weight", + "decode_head.cls_embed.weight", + "decode_head.mask_embed.0.weight", + "decode_head.mask_embed.2.weight", + "decode_head.mask_embed.4.weight" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias", + "decode_head.cls_embed.bias", + "decode_head.mask_embed.0.bias", + "decode_head.mask_embed.2.bias", + "decode_head.mask_embed.4.bias" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias", + "backbone.levels.0.blocks.0.res_post_norm1.0.weight", + "backbone.levels.0.blocks.0.res_post_norm1.0.bias", + "backbone.levels.0.blocks.0.res_post_norm2.0.weight", + "backbone.levels.0.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias", + "backbone.levels.0.blocks.1.res_post_norm1.0.weight", + "backbone.levels.0.blocks.1.res_post_norm1.0.bias", + "backbone.levels.0.blocks.1.res_post_norm2.0.weight", + "backbone.levels.0.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias", + "backbone.levels.0.blocks.2.res_post_norm1.0.weight", + "backbone.levels.0.blocks.2.res_post_norm1.0.bias", + "backbone.levels.0.blocks.2.res_post_norm2.0.weight", + "backbone.levels.0.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias", + "backbone.levels.0.blocks.3.res_post_norm1.0.weight", + "backbone.levels.0.blocks.3.res_post_norm1.0.bias", + "backbone.levels.0.blocks.3.res_post_norm2.0.weight", + "backbone.levels.0.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias", + "backbone.levels.0.blocks.4.res_post_norm1.0.weight", + "backbone.levels.0.blocks.4.res_post_norm1.0.bias", + "backbone.levels.0.blocks.4.res_post_norm2.0.weight", + "backbone.levels.0.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.norm1.0.weight", + "backbone.levels.0.blocks.5.norm1.0.bias", + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.5.dcn.offset.bias", + "backbone.levels.0.blocks.5.dcn.mask.bias", + "backbone.levels.0.blocks.5.dcn.input_proj.bias", + "backbone.levels.0.blocks.5.dcn.output_proj.bias", + "backbone.levels.0.blocks.5.norm2.0.weight", + "backbone.levels.0.blocks.5.norm2.0.bias", + "backbone.levels.0.blocks.5.mlp.fc1.bias", + "backbone.levels.0.blocks.5.mlp.fc2.bias", + "backbone.levels.0.blocks.5.res_post_norm1.0.weight", + "backbone.levels.0.blocks.5.res_post_norm1.0.bias", + "backbone.levels.0.blocks.5.res_post_norm2.0.weight", + "backbone.levels.0.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.0 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.5.dcn.offset.weight", + "backbone.levels.0.blocks.5.dcn.mask.weight", + "backbone.levels.0.blocks.5.dcn.input_proj.weight", + "backbone.levels.0.blocks.5.dcn.output_proj.weight", + "backbone.levels.0.blocks.5.mlp.fc1.weight", + "backbone.levels.0.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.05 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.0.norm.0.weight", + "backbone.levels.0.norm.0.bias", + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias", + "backbone.levels.1.blocks.0.res_post_norm1.0.weight", + "backbone.levels.1.blocks.0.res_post_norm1.0.bias", + "backbone.levels.1.blocks.0.res_post_norm2.0.weight", + "backbone.levels.1.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias", + "backbone.levels.1.blocks.1.res_post_norm1.0.weight", + "backbone.levels.1.blocks.1.res_post_norm1.0.bias", + "backbone.levels.1.blocks.1.res_post_norm2.0.weight", + "backbone.levels.1.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias", + "backbone.levels.1.blocks.2.res_post_norm1.0.weight", + "backbone.levels.1.blocks.2.res_post_norm1.0.bias", + "backbone.levels.1.blocks.2.res_post_norm2.0.weight", + "backbone.levels.1.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias", + "backbone.levels.1.blocks.3.res_post_norm1.0.weight", + "backbone.levels.1.blocks.3.res_post_norm1.0.bias", + "backbone.levels.1.blocks.3.res_post_norm2.0.weight", + "backbone.levels.1.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias", + "backbone.levels.1.blocks.4.res_post_norm1.0.weight", + "backbone.levels.1.blocks.4.res_post_norm1.0.bias", + "backbone.levels.1.blocks.4.res_post_norm2.0.weight", + "backbone.levels.1.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.0 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.05 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.norm1.0.weight", + "backbone.levels.1.blocks.5.norm1.0.bias", + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.5.dcn.offset.bias", + "backbone.levels.1.blocks.5.dcn.mask.bias", + "backbone.levels.1.blocks.5.dcn.input_proj.bias", + "backbone.levels.1.blocks.5.dcn.output_proj.bias", + "backbone.levels.1.blocks.5.norm2.0.weight", + "backbone.levels.1.blocks.5.norm2.0.bias", + "backbone.levels.1.blocks.5.mlp.fc1.bias", + "backbone.levels.1.blocks.5.mlp.fc2.bias", + "backbone.levels.1.blocks.5.res_post_norm1.0.weight", + "backbone.levels.1.blocks.5.res_post_norm1.0.bias", + "backbone.levels.1.blocks.5.res_post_norm2.0.weight", + "backbone.levels.1.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.5.dcn.offset.weight", + "backbone.levels.1.blocks.5.dcn.mask.weight", + "backbone.levels.1.blocks.5.dcn.input_proj.weight", + "backbone.levels.1.blocks.5.dcn.output_proj.weight", + "backbone.levels.1.blocks.5.mlp.fc1.weight", + "backbone.levels.1.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.1.norm.0.weight", + "backbone.levels.1.norm.0.bias", + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias", + "backbone.levels.2.blocks.0.res_post_norm1.0.weight", + "backbone.levels.2.blocks.0.res_post_norm1.0.bias", + "backbone.levels.2.blocks.0.res_post_norm2.0.weight", + "backbone.levels.2.blocks.0.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.0.0.weight", + "backbone.levels.2.post_norms.0.0.bias" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias", + "backbone.levels.2.blocks.1.res_post_norm1.0.weight", + "backbone.levels.2.blocks.1.res_post_norm1.0.bias", + "backbone.levels.2.blocks.1.res_post_norm2.0.weight", + "backbone.levels.2.blocks.1.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.1.0.weight", + "backbone.levels.2.post_norms.1.0.bias" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias", + "backbone.levels.2.blocks.2.res_post_norm1.0.weight", + "backbone.levels.2.blocks.2.res_post_norm1.0.bias", + "backbone.levels.2.blocks.2.res_post_norm2.0.weight", + "backbone.levels.2.blocks.2.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.2.0.weight", + "backbone.levels.2.post_norms.2.0.bias" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias", + "backbone.levels.2.blocks.3.res_post_norm1.0.weight", + "backbone.levels.2.blocks.3.res_post_norm1.0.bias", + "backbone.levels.2.blocks.3.res_post_norm2.0.weight", + "backbone.levels.2.blocks.3.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.3.0.weight", + "backbone.levels.2.post_norms.3.0.bias" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias", + "backbone.levels.2.blocks.4.res_post_norm1.0.weight", + "backbone.levels.2.blocks.4.res_post_norm1.0.bias", + "backbone.levels.2.blocks.4.res_post_norm2.0.weight", + "backbone.levels.2.blocks.4.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.4.0.weight", + "backbone.levels.2.post_norms.4.0.bias" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias", + "backbone.levels.2.blocks.5.res_post_norm1.0.weight", + "backbone.levels.2.blocks.5.res_post_norm1.0.bias", + "backbone.levels.2.blocks.5.res_post_norm2.0.weight", + "backbone.levels.2.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias", + "backbone.levels.2.blocks.6.res_post_norm1.0.weight", + "backbone.levels.2.blocks.6.res_post_norm1.0.bias", + "backbone.levels.2.blocks.6.res_post_norm2.0.weight", + "backbone.levels.2.blocks.6.res_post_norm2.0.bias" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias", + "backbone.levels.2.blocks.7.res_post_norm1.0.weight", + "backbone.levels.2.blocks.7.res_post_norm1.0.bias", + "backbone.levels.2.blocks.7.res_post_norm2.0.weight", + "backbone.levels.2.blocks.7.res_post_norm2.0.bias" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias", + "backbone.levels.2.blocks.8.res_post_norm1.0.weight", + "backbone.levels.2.blocks.8.res_post_norm1.0.bias", + "backbone.levels.2.blocks.8.res_post_norm2.0.weight", + "backbone.levels.2.blocks.8.res_post_norm2.0.bias" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias", + "backbone.levels.2.blocks.9.res_post_norm1.0.weight", + "backbone.levels.2.blocks.9.res_post_norm1.0.bias", + "backbone.levels.2.blocks.9.res_post_norm2.0.weight", + "backbone.levels.2.blocks.9.res_post_norm2.0.bias" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias", + "backbone.levels.2.blocks.10.res_post_norm1.0.weight", + "backbone.levels.2.blocks.10.res_post_norm1.0.bias", + "backbone.levels.2.blocks.10.res_post_norm2.0.weight", + "backbone.levels.2.blocks.10.res_post_norm2.0.bias" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias", + "backbone.levels.2.blocks.11.res_post_norm1.0.weight", + "backbone.levels.2.blocks.11.res_post_norm1.0.bias", + "backbone.levels.2.blocks.11.res_post_norm2.0.weight", + "backbone.levels.2.blocks.11.res_post_norm2.0.bias" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias", + "backbone.levels.2.blocks.12.res_post_norm1.0.weight", + "backbone.levels.2.blocks.12.res_post_norm1.0.bias", + "backbone.levels.2.blocks.12.res_post_norm2.0.weight", + "backbone.levels.2.blocks.12.res_post_norm2.0.bias" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias", + "backbone.levels.2.blocks.13.res_post_norm1.0.weight", + "backbone.levels.2.blocks.13.res_post_norm1.0.bias", + "backbone.levels.2.blocks.13.res_post_norm2.0.weight", + "backbone.levels.2.blocks.13.res_post_norm2.0.bias" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias", + "backbone.levels.2.blocks.14.res_post_norm1.0.weight", + "backbone.levels.2.blocks.14.res_post_norm1.0.bias", + "backbone.levels.2.blocks.14.res_post_norm2.0.weight", + "backbone.levels.2.blocks.14.res_post_norm2.0.bias" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias", + "backbone.levels.2.blocks.15.res_post_norm1.0.weight", + "backbone.levels.2.blocks.15.res_post_norm1.0.bias", + "backbone.levels.2.blocks.15.res_post_norm2.0.weight", + "backbone.levels.2.blocks.15.res_post_norm2.0.bias" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias", + "backbone.levels.2.blocks.16.res_post_norm1.0.weight", + "backbone.levels.2.blocks.16.res_post_norm1.0.bias", + "backbone.levels.2.blocks.16.res_post_norm2.0.weight", + "backbone.levels.2.blocks.16.res_post_norm2.0.bias" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias", + "backbone.levels.2.blocks.17.res_post_norm1.0.weight", + "backbone.levels.2.blocks.17.res_post_norm1.0.bias", + "backbone.levels.2.blocks.17.res_post_norm2.0.weight", + "backbone.levels.2.blocks.17.res_post_norm2.0.bias" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias", + "backbone.levels.2.blocks.18.res_post_norm1.0.weight", + "backbone.levels.2.blocks.18.res_post_norm1.0.bias", + "backbone.levels.2.blocks.18.res_post_norm2.0.weight", + "backbone.levels.2.blocks.18.res_post_norm2.0.bias" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias", + "backbone.levels.2.blocks.19.res_post_norm1.0.weight", + "backbone.levels.2.blocks.19.res_post_norm1.0.bias", + "backbone.levels.2.blocks.19.res_post_norm2.0.weight", + "backbone.levels.2.blocks.19.res_post_norm2.0.bias" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias", + "backbone.levels.2.blocks.20.res_post_norm1.0.weight", + "backbone.levels.2.blocks.20.res_post_norm1.0.bias", + "backbone.levels.2.blocks.20.res_post_norm2.0.weight", + "backbone.levels.2.blocks.20.res_post_norm2.0.bias" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias", + "backbone.levels.2.blocks.21.res_post_norm1.0.weight", + "backbone.levels.2.blocks.21.res_post_norm1.0.bias", + "backbone.levels.2.blocks.21.res_post_norm2.0.weight", + "backbone.levels.2.blocks.21.res_post_norm2.0.bias" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias", + "backbone.levels.2.blocks.22.res_post_norm1.0.weight", + "backbone.levels.2.blocks.22.res_post_norm1.0.bias", + "backbone.levels.2.blocks.22.res_post_norm2.0.weight", + "backbone.levels.2.blocks.22.res_post_norm2.0.bias" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.0 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.05 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias", + "backbone.levels.2.blocks.23.res_post_norm1.0.weight", + "backbone.levels.2.blocks.23.res_post_norm1.0.bias", + "backbone.levels.2.blocks.23.res_post_norm2.0.weight", + "backbone.levels.2.blocks.23.res_post_norm2.0.bias" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.norm1.0.weight", + "backbone.levels.2.blocks.24.norm1.0.bias", + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.24.dcn.offset.bias", + "backbone.levels.2.blocks.24.dcn.mask.bias", + "backbone.levels.2.blocks.24.dcn.input_proj.bias", + "backbone.levels.2.blocks.24.dcn.output_proj.bias", + "backbone.levels.2.blocks.24.norm2.0.weight", + "backbone.levels.2.blocks.24.norm2.0.bias", + "backbone.levels.2.blocks.24.mlp.fc1.bias", + "backbone.levels.2.blocks.24.mlp.fc2.bias", + "backbone.levels.2.blocks.24.res_post_norm1.0.weight", + "backbone.levels.2.blocks.24.res_post_norm1.0.bias", + "backbone.levels.2.blocks.24.res_post_norm2.0.weight", + "backbone.levels.2.blocks.24.res_post_norm2.0.bias" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.24.dcn.offset.weight", + "backbone.levels.2.blocks.24.dcn.mask.weight", + "backbone.levels.2.blocks.24.dcn.input_proj.weight", + "backbone.levels.2.blocks.24.dcn.output_proj.weight", + "backbone.levels.2.blocks.24.mlp.fc1.weight", + "backbone.levels.2.blocks.24.mlp.fc2.weight" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.norm1.0.weight", + "backbone.levels.2.blocks.25.norm1.0.bias", + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.25.dcn.offset.bias", + "backbone.levels.2.blocks.25.dcn.mask.bias", + "backbone.levels.2.blocks.25.dcn.input_proj.bias", + "backbone.levels.2.blocks.25.dcn.output_proj.bias", + "backbone.levels.2.blocks.25.norm2.0.weight", + "backbone.levels.2.blocks.25.norm2.0.bias", + "backbone.levels.2.blocks.25.mlp.fc1.bias", + "backbone.levels.2.blocks.25.mlp.fc2.bias", + "backbone.levels.2.blocks.25.res_post_norm1.0.weight", + "backbone.levels.2.blocks.25.res_post_norm1.0.bias", + "backbone.levels.2.blocks.25.res_post_norm2.0.weight", + "backbone.levels.2.blocks.25.res_post_norm2.0.bias" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.25.dcn.offset.weight", + "backbone.levels.2.blocks.25.dcn.mask.weight", + "backbone.levels.2.blocks.25.dcn.input_proj.weight", + "backbone.levels.2.blocks.25.dcn.output_proj.weight", + "backbone.levels.2.blocks.25.mlp.fc1.weight", + "backbone.levels.2.blocks.25.mlp.fc2.weight" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.norm1.0.weight", + "backbone.levels.2.blocks.26.norm1.0.bias", + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.26.dcn.offset.bias", + "backbone.levels.2.blocks.26.dcn.mask.bias", + "backbone.levels.2.blocks.26.dcn.input_proj.bias", + "backbone.levels.2.blocks.26.dcn.output_proj.bias", + "backbone.levels.2.blocks.26.norm2.0.weight", + "backbone.levels.2.blocks.26.norm2.0.bias", + "backbone.levels.2.blocks.26.mlp.fc1.bias", + "backbone.levels.2.blocks.26.mlp.fc2.bias", + "backbone.levels.2.blocks.26.res_post_norm1.0.weight", + "backbone.levels.2.blocks.26.res_post_norm1.0.bias", + "backbone.levels.2.blocks.26.res_post_norm2.0.weight", + "backbone.levels.2.blocks.26.res_post_norm2.0.bias" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.26.dcn.offset.weight", + "backbone.levels.2.blocks.26.dcn.mask.weight", + "backbone.levels.2.blocks.26.dcn.input_proj.weight", + "backbone.levels.2.blocks.26.dcn.output_proj.weight", + "backbone.levels.2.blocks.26.mlp.fc1.weight", + "backbone.levels.2.blocks.26.mlp.fc2.weight" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.norm1.0.weight", + "backbone.levels.2.blocks.27.norm1.0.bias", + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.27.dcn.offset.bias", + "backbone.levels.2.blocks.27.dcn.mask.bias", + "backbone.levels.2.blocks.27.dcn.input_proj.bias", + "backbone.levels.2.blocks.27.dcn.output_proj.bias", + "backbone.levels.2.blocks.27.norm2.0.weight", + "backbone.levels.2.blocks.27.norm2.0.bias", + "backbone.levels.2.blocks.27.mlp.fc1.bias", + "backbone.levels.2.blocks.27.mlp.fc2.bias", + "backbone.levels.2.blocks.27.res_post_norm1.0.weight", + "backbone.levels.2.blocks.27.res_post_norm1.0.bias", + "backbone.levels.2.blocks.27.res_post_norm2.0.weight", + "backbone.levels.2.blocks.27.res_post_norm2.0.bias" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.0 + }, + "layer_40_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.27.dcn.offset.weight", + "backbone.levels.2.blocks.27.dcn.mask.weight", + "backbone.levels.2.blocks.27.dcn.input_proj.weight", + "backbone.levels.2.blocks.27.dcn.output_proj.weight", + "backbone.levels.2.blocks.27.mlp.fc1.weight", + "backbone.levels.2.blocks.27.mlp.fc2.weight" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.05 + }, + "layer_41_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.norm1.0.weight", + "backbone.levels.2.blocks.28.norm1.0.bias", + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.28.dcn.offset.bias", + "backbone.levels.2.blocks.28.dcn.mask.bias", + "backbone.levels.2.blocks.28.dcn.input_proj.bias", + "backbone.levels.2.blocks.28.dcn.output_proj.bias", + "backbone.levels.2.blocks.28.norm2.0.weight", + "backbone.levels.2.blocks.28.norm2.0.bias", + "backbone.levels.2.blocks.28.mlp.fc1.bias", + "backbone.levels.2.blocks.28.mlp.fc2.bias", + "backbone.levels.2.blocks.28.res_post_norm1.0.weight", + "backbone.levels.2.blocks.28.res_post_norm1.0.bias", + "backbone.levels.2.blocks.28.res_post_norm2.0.weight", + "backbone.levels.2.blocks.28.res_post_norm2.0.bias" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.0 + }, + "layer_41_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.28.dcn.offset.weight", + "backbone.levels.2.blocks.28.dcn.mask.weight", + "backbone.levels.2.blocks.28.dcn.input_proj.weight", + "backbone.levels.2.blocks.28.dcn.output_proj.weight", + "backbone.levels.2.blocks.28.mlp.fc1.weight", + "backbone.levels.2.blocks.28.mlp.fc2.weight" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.05 + }, + "layer_42_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.norm1.0.weight", + "backbone.levels.2.blocks.29.norm1.0.bias", + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.29.dcn.offset.bias", + "backbone.levels.2.blocks.29.dcn.mask.bias", + "backbone.levels.2.blocks.29.dcn.input_proj.bias", + "backbone.levels.2.blocks.29.dcn.output_proj.bias", + "backbone.levels.2.blocks.29.norm2.0.weight", + "backbone.levels.2.blocks.29.norm2.0.bias", + "backbone.levels.2.blocks.29.mlp.fc1.bias", + "backbone.levels.2.blocks.29.mlp.fc2.bias", + "backbone.levels.2.blocks.29.res_post_norm1.0.weight", + "backbone.levels.2.blocks.29.res_post_norm1.0.bias", + "backbone.levels.2.blocks.29.res_post_norm2.0.weight", + "backbone.levels.2.blocks.29.res_post_norm2.0.bias" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.0 + }, + "layer_42_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.29.dcn.offset.weight", + "backbone.levels.2.blocks.29.dcn.mask.weight", + "backbone.levels.2.blocks.29.dcn.input_proj.weight", + "backbone.levels.2.blocks.29.dcn.output_proj.weight", + "backbone.levels.2.blocks.29.mlp.fc1.weight", + "backbone.levels.2.blocks.29.mlp.fc2.weight" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.05 + }, + "layer_43_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.norm1.0.weight", + "backbone.levels.2.blocks.30.norm1.0.bias", + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.30.dcn.offset.bias", + "backbone.levels.2.blocks.30.dcn.mask.bias", + "backbone.levels.2.blocks.30.dcn.input_proj.bias", + "backbone.levels.2.blocks.30.dcn.output_proj.bias", + "backbone.levels.2.blocks.30.norm2.0.weight", + "backbone.levels.2.blocks.30.norm2.0.bias", + "backbone.levels.2.blocks.30.mlp.fc1.bias", + "backbone.levels.2.blocks.30.mlp.fc2.bias", + "backbone.levels.2.blocks.30.res_post_norm1.0.weight", + "backbone.levels.2.blocks.30.res_post_norm1.0.bias", + "backbone.levels.2.blocks.30.res_post_norm2.0.weight", + "backbone.levels.2.blocks.30.res_post_norm2.0.bias" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.0 + }, + "layer_43_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.30.dcn.offset.weight", + "backbone.levels.2.blocks.30.dcn.mask.weight", + "backbone.levels.2.blocks.30.dcn.input_proj.weight", + "backbone.levels.2.blocks.30.dcn.output_proj.weight", + "backbone.levels.2.blocks.30.mlp.fc1.weight", + "backbone.levels.2.blocks.30.mlp.fc2.weight" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.05 + }, + "layer_44_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.norm1.0.weight", + "backbone.levels.2.blocks.31.norm1.0.bias", + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.31.dcn.offset.bias", + "backbone.levels.2.blocks.31.dcn.mask.bias", + "backbone.levels.2.blocks.31.dcn.input_proj.bias", + "backbone.levels.2.blocks.31.dcn.output_proj.bias", + "backbone.levels.2.blocks.31.norm2.0.weight", + "backbone.levels.2.blocks.31.norm2.0.bias", + "backbone.levels.2.blocks.31.mlp.fc1.bias", + "backbone.levels.2.blocks.31.mlp.fc2.bias", + "backbone.levels.2.blocks.31.res_post_norm1.0.weight", + "backbone.levels.2.blocks.31.res_post_norm1.0.bias", + "backbone.levels.2.blocks.31.res_post_norm2.0.weight", + "backbone.levels.2.blocks.31.res_post_norm2.0.bias" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.0 + }, + "layer_44_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.31.dcn.offset.weight", + "backbone.levels.2.blocks.31.dcn.mask.weight", + "backbone.levels.2.blocks.31.dcn.input_proj.weight", + "backbone.levels.2.blocks.31.dcn.output_proj.weight", + "backbone.levels.2.blocks.31.mlp.fc1.weight", + "backbone.levels.2.blocks.31.mlp.fc2.weight" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.05 + }, + "layer_45_no_decay": { + "param_names": [ + "backbone.levels.2.norm.0.weight", + "backbone.levels.2.norm.0.bias", + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias", + "backbone.levels.3.blocks.0.res_post_norm1.0.weight", + "backbone.levels.3.blocks.0.res_post_norm1.0.bias", + "backbone.levels.3.blocks.0.res_post_norm2.0.weight", + "backbone.levels.3.blocks.0.res_post_norm2.0.bias", + "backbone.levels.3.norm.0.weight", + "backbone.levels.3.norm.0.bias" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.0 + }, + "layer_45_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.05 + }, + "layer_46_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias", + "backbone.levels.3.blocks.1.res_post_norm1.0.weight", + "backbone.levels.3.blocks.1.res_post_norm1.0.bias", + "backbone.levels.3.blocks.1.res_post_norm2.0.weight", + "backbone.levels.3.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.0 + }, + "layer_46_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.05 + }, + "layer_47_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias", + "backbone.levels.3.blocks.2.res_post_norm1.0.weight", + "backbone.levels.3.blocks.2.res_post_norm1.0.bias", + "backbone.levels.3.blocks.2.res_post_norm2.0.weight", + "backbone.levels.3.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.0 + }, + "layer_47_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.05 + }, + "layer_48_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias", + "backbone.levels.3.blocks.3.res_post_norm1.0.weight", + "backbone.levels.3.blocks.3.res_post_norm1.0.bias", + "backbone.levels.3.blocks.3.res_post_norm2.0.weight", + "backbone.levels.3.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.0 + }, + "layer_48_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.05 + }, + "layer_49_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias", + "backbone.levels.3.blocks.4.res_post_norm1.0.weight", + "backbone.levels.3.blocks.4.res_post_norm1.0.bias", + "backbone.levels.3.blocks.4.res_post_norm2.0.weight", + "backbone.levels.3.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.0 + }, + "layer_49_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.05 + }, + "layer_50_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.norm1.0.weight", + "backbone.levels.3.blocks.5.norm1.0.bias", + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.5.dcn.offset.bias", + "backbone.levels.3.blocks.5.dcn.mask.bias", + "backbone.levels.3.blocks.5.dcn.input_proj.bias", + "backbone.levels.3.blocks.5.dcn.output_proj.bias", + "backbone.levels.3.blocks.5.norm2.0.weight", + "backbone.levels.3.blocks.5.norm2.0.bias", + "backbone.levels.3.blocks.5.mlp.fc1.bias", + "backbone.levels.3.blocks.5.mlp.fc2.bias", + "backbone.levels.3.blocks.5.res_post_norm1.0.weight", + "backbone.levels.3.blocks.5.res_post_norm1.0.bias", + "backbone.levels.3.blocks.5.res_post_norm2.0.weight", + "backbone.levels.3.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.0 + }, + "layer_50_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.5.dcn.offset.weight", + "backbone.levels.3.blocks.5.dcn.mask.weight", + "backbone.levels.3.blocks.5.dcn.input_proj.weight", + "backbone.levels.3.blocks.5.dcn.output_proj.weight", + "backbone.levels.3.blocks.5.mlp.fc1.weight", + "backbone.levels.3.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.05 + }, + "layer_51_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.weight", + "decode_head.pixel_decoder.input_convs.1.conv.weight", + "decode_head.pixel_decoder.input_convs.2.conv.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.level_encoding.weight", + "decode_head.pixel_decoder.lateral_convs.0.conv.weight", + "decode_head.pixel_decoder.output_convs.0.conv.weight", + "decode_head.pixel_decoder.mask_feature.weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.05 + }, + "layer_51_no_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.bias", + "decode_head.pixel_decoder.input_convs.0.gn.weight", + "decode_head.pixel_decoder.input_convs.0.gn.bias", + "decode_head.pixel_decoder.input_convs.1.conv.bias", + "decode_head.pixel_decoder.input_convs.1.gn.weight", + "decode_head.pixel_decoder.input_convs.1.gn.bias", + "decode_head.pixel_decoder.input_convs.2.conv.bias", + "decode_head.pixel_decoder.input_convs.2.gn.weight", + "decode_head.pixel_decoder.input_convs.2.gn.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.bias", + "decode_head.pixel_decoder.lateral_convs.0.gn.weight", + "decode_head.pixel_decoder.lateral_convs.0.gn.bias", + "decode_head.pixel_decoder.output_convs.0.gn.weight", + "decode_head.pixel_decoder.output_convs.0.gn.bias", + "decode_head.pixel_decoder.mask_feature.bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.0.norms.0.weight", + "decode_head.transformer_decoder.layers.0.norms.0.bias", + "decode_head.transformer_decoder.layers.0.norms.1.weight", + "decode_head.transformer_decoder.layers.0.norms.1.bias", + "decode_head.transformer_decoder.layers.0.norms.2.weight", + "decode_head.transformer_decoder.layers.0.norms.2.bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.1.norms.0.weight", + "decode_head.transformer_decoder.layers.1.norms.0.bias", + "decode_head.transformer_decoder.layers.1.norms.1.weight", + "decode_head.transformer_decoder.layers.1.norms.1.bias", + "decode_head.transformer_decoder.layers.1.norms.2.weight", + "decode_head.transformer_decoder.layers.1.norms.2.bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.2.norms.0.weight", + "decode_head.transformer_decoder.layers.2.norms.0.bias", + "decode_head.transformer_decoder.layers.2.norms.1.weight", + "decode_head.transformer_decoder.layers.2.norms.1.bias", + "decode_head.transformer_decoder.layers.2.norms.2.weight", + "decode_head.transformer_decoder.layers.2.norms.2.bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.3.norms.0.weight", + "decode_head.transformer_decoder.layers.3.norms.0.bias", + "decode_head.transformer_decoder.layers.3.norms.1.weight", + "decode_head.transformer_decoder.layers.3.norms.1.bias", + "decode_head.transformer_decoder.layers.3.norms.2.weight", + "decode_head.transformer_decoder.layers.3.norms.2.bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.4.norms.0.weight", + "decode_head.transformer_decoder.layers.4.norms.0.bias", + "decode_head.transformer_decoder.layers.4.norms.1.weight", + "decode_head.transformer_decoder.layers.4.norms.1.bias", + "decode_head.transformer_decoder.layers.4.norms.2.weight", + "decode_head.transformer_decoder.layers.4.norms.2.bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.5.norms.0.weight", + "decode_head.transformer_decoder.layers.5.norms.0.bias", + "decode_head.transformer_decoder.layers.5.norms.1.weight", + "decode_head.transformer_decoder.layers.5.norms.1.bias", + "decode_head.transformer_decoder.layers.5.norms.2.weight", + "decode_head.transformer_decoder.layers.5.norms.2.bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.6.norms.0.weight", + "decode_head.transformer_decoder.layers.6.norms.0.bias", + "decode_head.transformer_decoder.layers.6.norms.1.weight", + "decode_head.transformer_decoder.layers.6.norms.1.bias", + "decode_head.transformer_decoder.layers.6.norms.2.weight", + "decode_head.transformer_decoder.layers.6.norms.2.bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.7.norms.0.weight", + "decode_head.transformer_decoder.layers.7.norms.0.bias", + "decode_head.transformer_decoder.layers.7.norms.1.weight", + "decode_head.transformer_decoder.layers.7.norms.1.bias", + "decode_head.transformer_decoder.layers.7.norms.2.weight", + "decode_head.transformer_decoder.layers.7.norms.2.bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.8.norms.0.weight", + "decode_head.transformer_decoder.layers.8.norms.0.bias", + "decode_head.transformer_decoder.layers.8.norms.1.weight", + "decode_head.transformer_decoder.layers.8.norms.1.bias", + "decode_head.transformer_decoder.layers.8.norms.2.weight", + "decode_head.transformer_decoder.layers.8.norms.2.bias", + "decode_head.transformer_decoder.post_norm.weight", + "decode_head.transformer_decoder.post_norm.bias" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.0 + } +} +2025-05-28 02:50:19,941 - mmseg - INFO - Loaded 1159 images +2025-05-28 02:50:19,942 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth +2025-05-28 02:50:25,842 - mmseg - INFO - Start running, host: yiming@pasteur2.stanford.edu, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 02:50:25,842 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 02:50:25,843 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters +2025-05-28 02:50:25,843 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024940.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024940.log.json new file mode 100644 index 0000000000000000000000000000000000000000..5e575cf70f11e174364ae52a743188c18c0a9b7f --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_024940.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0: NVIDIA TITAN RTX\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 12.2, V12.2.91\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 1860035215, "exp_name": "mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "num_things_classes = 100\nnum_stuff_classes = 50\nnum_classes = 19\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderMask2Former',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=320,\n depths=[6, 6, 32, 6],\n groups=[10, 20, 40, 80],\n mlp_ratio=4.0,\n drop_path_rate=0.5,\n norm_layer='LN',\n layer_scale=None,\n offset_scale=1.0,\n post_norm=False,\n dw_kernel_size=5,\n res_post_norm=True,\n level2_post_norm=True,\n level2_post_norm_block_ids=[5, 11, 17, 23, 29],\n center_feature_scale=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='Mask2FormerHead',\n in_channels=[320, 640, 1280, 2560],\n feat_channels=256,\n out_channels=256,\n in_index=[0, 1, 2, 3],\n num_things_classes=100,\n num_stuff_classes=50,\n num_queries=100,\n num_transformer_feat_level=3,\n pixel_decoder=dict(\n type='MSDeformAttnPixelDecoder',\n num_outs=3,\n norm_cfg=dict(type='GN', num_groups=32),\n act_cfg=dict(type='ReLU'),\n encoder=dict(\n type='DetrTransformerEncoder',\n num_layers=6,\n transformerlayers=dict(\n type='BaseTransformerLayer',\n attn_cfgs=dict(\n type='MultiScaleDeformableAttention',\n embed_dims=256,\n num_heads=8,\n num_levels=3,\n num_points=4,\n im2col_step=64,\n dropout=0.0,\n batch_first=False,\n norm_cfg=None,\n init_cfg=None),\n ffn_cfgs=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n with_cp=False),\n operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n init_cfg=None),\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n init_cfg=None),\n enforce_decoder_input_project=False,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n transformer_decoder=dict(\n type='DetrTransformerDecoder',\n return_intermediate=True,\n num_layers=9,\n transformerlayers=dict(\n type='DetrTransformerDecoderLayer',\n attn_cfgs=dict(\n type='MultiheadAttention',\n embed_dims=256,\n num_heads=8,\n attn_drop=0.0,\n proj_drop=0.0,\n dropout_layer=None,\n batch_first=False),\n ffn_cfgs=dict(\n embed_dims=256,\n feedforward_channels=2048,\n num_fcs=2,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_drop=0.0,\n dropout_layer=None,\n add_identity=True,\n with_cp=False),\n feedforward_channels=2048,\n operation_order=('cross_attn', 'norm', 'self_attn', 'norm',\n 'ffn', 'norm')),\n init_cfg=None),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=2.0,\n reduction='mean',\n class_weight=[\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1\n ]),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=5.0),\n loss_dice=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n reduction='mean',\n naive_dice=True,\n eps=1.0,\n loss_weight=5.0),\n num_classes=19,\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512))),\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512)),\n init_cfg=None)\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (1024, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(\n type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=1e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=50,\n layer_decay_rate=0.95,\n depths=[6, 6, 32, 6],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 1)\nauto_resume = False\ndevice = 'cuda'\nseed = 1860035215\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025504.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025504.log new file mode 100644 index 0000000000000000000000000000000000000000..3da6e054b31c62f926bd8c7cf09fe963214d7297 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025504.log @@ -0,0 +1,7658 @@ +2025-05-28 02:55:04,630 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:55:04,631 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:55:04,672 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:55:04,672 - mmseg - INFO - Distributed training: True +2025-05-28 02:55:05,094 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:55:05,094 - mmseg - INFO - Set random seed to 804528400, deterministic: False +2025-05-28 02:55:05,095 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:55:05,095 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:55:05,095 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:55:05,095 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:55:05,095 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:55:05,095 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:55:05,095 - mmseg - INFO - res_post_norm: True +2025-05-28 02:55:05,095 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:55:32,905 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 02:55:39,276 - mmseg - INFO - Loaded 2316 images +2025-05-28 02:55:39,276 - mmseg - INFO - Built training dataset from config: {'type': 'CityscapesDataset', 'data_root': '/pasteur/u/yiming/homework4/cityscapes', 'img_dir': 'leftImg8bit/', 'ann_dir': 'gtFine/', 'pipeline': [{'type': 'LoadImageFromFile'}, {'type': 'LoadAnnotations'}, {'type': 'Resize', 'img_scale': (2048, 1024), 'ratio_range': (0.5, 2.0)}, {'type': 'RandomCrop', 'crop_size': (1024, 1024), 'cat_max_ratio': 0.75}, {'type': 'RandomFlip', 'prob': 0.5}, {'type': 'PhotoMetricDistortion'}, {'type': 'Normalize', 'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True}, {'type': 'Pad', 'size': (1024, 1024), 'pad_val': 0, 'seg_pad_val': 255}, {'type': 'ToMask'}, {'type': 'DefaultFormatBundle'}, {'type': 'Collect', 'keys': ['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']}], 'split': 'splits/fold_1_train_split.txt'} +2025-05-28 02:55:39,277 - mmseg - INFO - Number of samples in training dataset: 2316 +2025-05-28 02:55:44,889 - mmseg - INFO - {'num_layers': 50, 'layer_decay_rate': 0.95, 'depths': [6, 6, 32, 6], 'offset_lr_scale': 1.0} +2025-05-28 02:55:44,889 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.950000 - 52 +2025-05-28 02:55:44,899 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight", + "decode_head.query_embed.weight", + "decode_head.query_feat.weight", + "decode_head.level_embed.weight", + "decode_head.cls_embed.weight", + "decode_head.mask_embed.0.weight", + "decode_head.mask_embed.2.weight", + "decode_head.mask_embed.4.weight" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias", + "decode_head.cls_embed.bias", + "decode_head.mask_embed.0.bias", + "decode_head.mask_embed.2.bias", + "decode_head.mask_embed.4.bias" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias", + "backbone.levels.0.blocks.0.res_post_norm1.0.weight", + "backbone.levels.0.blocks.0.res_post_norm1.0.bias", + "backbone.levels.0.blocks.0.res_post_norm2.0.weight", + "backbone.levels.0.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias", + "backbone.levels.0.blocks.1.res_post_norm1.0.weight", + "backbone.levels.0.blocks.1.res_post_norm1.0.bias", + "backbone.levels.0.blocks.1.res_post_norm2.0.weight", + "backbone.levels.0.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias", + "backbone.levels.0.blocks.2.res_post_norm1.0.weight", + "backbone.levels.0.blocks.2.res_post_norm1.0.bias", + "backbone.levels.0.blocks.2.res_post_norm2.0.weight", + "backbone.levels.0.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias", + "backbone.levels.0.blocks.3.res_post_norm1.0.weight", + "backbone.levels.0.blocks.3.res_post_norm1.0.bias", + "backbone.levels.0.blocks.3.res_post_norm2.0.weight", + "backbone.levels.0.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias", + "backbone.levels.0.blocks.4.res_post_norm1.0.weight", + "backbone.levels.0.blocks.4.res_post_norm1.0.bias", + "backbone.levels.0.blocks.4.res_post_norm2.0.weight", + "backbone.levels.0.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.norm1.0.weight", + "backbone.levels.0.blocks.5.norm1.0.bias", + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.5.dcn.offset.bias", + "backbone.levels.0.blocks.5.dcn.mask.bias", + "backbone.levels.0.blocks.5.dcn.input_proj.bias", + "backbone.levels.0.blocks.5.dcn.output_proj.bias", + "backbone.levels.0.blocks.5.norm2.0.weight", + "backbone.levels.0.blocks.5.norm2.0.bias", + "backbone.levels.0.blocks.5.mlp.fc1.bias", + "backbone.levels.0.blocks.5.mlp.fc2.bias", + "backbone.levels.0.blocks.5.res_post_norm1.0.weight", + "backbone.levels.0.blocks.5.res_post_norm1.0.bias", + "backbone.levels.0.blocks.5.res_post_norm2.0.weight", + "backbone.levels.0.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.0 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.5.dcn.offset.weight", + "backbone.levels.0.blocks.5.dcn.mask.weight", + "backbone.levels.0.blocks.5.dcn.input_proj.weight", + "backbone.levels.0.blocks.5.dcn.output_proj.weight", + "backbone.levels.0.blocks.5.mlp.fc1.weight", + "backbone.levels.0.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.05 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.0.norm.0.weight", + "backbone.levels.0.norm.0.bias", + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias", + "backbone.levels.1.blocks.0.res_post_norm1.0.weight", + "backbone.levels.1.blocks.0.res_post_norm1.0.bias", + "backbone.levels.1.blocks.0.res_post_norm2.0.weight", + "backbone.levels.1.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias", + "backbone.levels.1.blocks.1.res_post_norm1.0.weight", + "backbone.levels.1.blocks.1.res_post_norm1.0.bias", + "backbone.levels.1.blocks.1.res_post_norm2.0.weight", + "backbone.levels.1.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias", + "backbone.levels.1.blocks.2.res_post_norm1.0.weight", + "backbone.levels.1.blocks.2.res_post_norm1.0.bias", + "backbone.levels.1.blocks.2.res_post_norm2.0.weight", + "backbone.levels.1.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias", + "backbone.levels.1.blocks.3.res_post_norm1.0.weight", + "backbone.levels.1.blocks.3.res_post_norm1.0.bias", + "backbone.levels.1.blocks.3.res_post_norm2.0.weight", + "backbone.levels.1.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias", + "backbone.levels.1.blocks.4.res_post_norm1.0.weight", + "backbone.levels.1.blocks.4.res_post_norm1.0.bias", + "backbone.levels.1.blocks.4.res_post_norm2.0.weight", + "backbone.levels.1.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.0 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.05 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.norm1.0.weight", + "backbone.levels.1.blocks.5.norm1.0.bias", + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.5.dcn.offset.bias", + "backbone.levels.1.blocks.5.dcn.mask.bias", + "backbone.levels.1.blocks.5.dcn.input_proj.bias", + "backbone.levels.1.blocks.5.dcn.output_proj.bias", + "backbone.levels.1.blocks.5.norm2.0.weight", + "backbone.levels.1.blocks.5.norm2.0.bias", + "backbone.levels.1.blocks.5.mlp.fc1.bias", + "backbone.levels.1.blocks.5.mlp.fc2.bias", + "backbone.levels.1.blocks.5.res_post_norm1.0.weight", + "backbone.levels.1.blocks.5.res_post_norm1.0.bias", + "backbone.levels.1.blocks.5.res_post_norm2.0.weight", + "backbone.levels.1.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.5.dcn.offset.weight", + "backbone.levels.1.blocks.5.dcn.mask.weight", + "backbone.levels.1.blocks.5.dcn.input_proj.weight", + "backbone.levels.1.blocks.5.dcn.output_proj.weight", + "backbone.levels.1.blocks.5.mlp.fc1.weight", + "backbone.levels.1.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.1.norm.0.weight", + "backbone.levels.1.norm.0.bias", + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias", + "backbone.levels.2.blocks.0.res_post_norm1.0.weight", + "backbone.levels.2.blocks.0.res_post_norm1.0.bias", + "backbone.levels.2.blocks.0.res_post_norm2.0.weight", + "backbone.levels.2.blocks.0.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.0.0.weight", + "backbone.levels.2.post_norms.0.0.bias" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias", + "backbone.levels.2.blocks.1.res_post_norm1.0.weight", + "backbone.levels.2.blocks.1.res_post_norm1.0.bias", + "backbone.levels.2.blocks.1.res_post_norm2.0.weight", + "backbone.levels.2.blocks.1.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.1.0.weight", + "backbone.levels.2.post_norms.1.0.bias" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias", + "backbone.levels.2.blocks.2.res_post_norm1.0.weight", + "backbone.levels.2.blocks.2.res_post_norm1.0.bias", + "backbone.levels.2.blocks.2.res_post_norm2.0.weight", + "backbone.levels.2.blocks.2.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.2.0.weight", + "backbone.levels.2.post_norms.2.0.bias" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias", + "backbone.levels.2.blocks.3.res_post_norm1.0.weight", + "backbone.levels.2.blocks.3.res_post_norm1.0.bias", + "backbone.levels.2.blocks.3.res_post_norm2.0.weight", + "backbone.levels.2.blocks.3.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.3.0.weight", + "backbone.levels.2.post_norms.3.0.bias" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias", + "backbone.levels.2.blocks.4.res_post_norm1.0.weight", + "backbone.levels.2.blocks.4.res_post_norm1.0.bias", + "backbone.levels.2.blocks.4.res_post_norm2.0.weight", + "backbone.levels.2.blocks.4.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.4.0.weight", + "backbone.levels.2.post_norms.4.0.bias" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias", + "backbone.levels.2.blocks.5.res_post_norm1.0.weight", + "backbone.levels.2.blocks.5.res_post_norm1.0.bias", + "backbone.levels.2.blocks.5.res_post_norm2.0.weight", + "backbone.levels.2.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias", + "backbone.levels.2.blocks.6.res_post_norm1.0.weight", + "backbone.levels.2.blocks.6.res_post_norm1.0.bias", + "backbone.levels.2.blocks.6.res_post_norm2.0.weight", + "backbone.levels.2.blocks.6.res_post_norm2.0.bias" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias", + "backbone.levels.2.blocks.7.res_post_norm1.0.weight", + "backbone.levels.2.blocks.7.res_post_norm1.0.bias", + "backbone.levels.2.blocks.7.res_post_norm2.0.weight", + "backbone.levels.2.blocks.7.res_post_norm2.0.bias" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias", + "backbone.levels.2.blocks.8.res_post_norm1.0.weight", + "backbone.levels.2.blocks.8.res_post_norm1.0.bias", + "backbone.levels.2.blocks.8.res_post_norm2.0.weight", + "backbone.levels.2.blocks.8.res_post_norm2.0.bias" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias", + "backbone.levels.2.blocks.9.res_post_norm1.0.weight", + "backbone.levels.2.blocks.9.res_post_norm1.0.bias", + "backbone.levels.2.blocks.9.res_post_norm2.0.weight", + "backbone.levels.2.blocks.9.res_post_norm2.0.bias" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias", + "backbone.levels.2.blocks.10.res_post_norm1.0.weight", + "backbone.levels.2.blocks.10.res_post_norm1.0.bias", + "backbone.levels.2.blocks.10.res_post_norm2.0.weight", + "backbone.levels.2.blocks.10.res_post_norm2.0.bias" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias", + "backbone.levels.2.blocks.11.res_post_norm1.0.weight", + "backbone.levels.2.blocks.11.res_post_norm1.0.bias", + "backbone.levels.2.blocks.11.res_post_norm2.0.weight", + "backbone.levels.2.blocks.11.res_post_norm2.0.bias" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias", + "backbone.levels.2.blocks.12.res_post_norm1.0.weight", + "backbone.levels.2.blocks.12.res_post_norm1.0.bias", + "backbone.levels.2.blocks.12.res_post_norm2.0.weight", + "backbone.levels.2.blocks.12.res_post_norm2.0.bias" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias", + "backbone.levels.2.blocks.13.res_post_norm1.0.weight", + "backbone.levels.2.blocks.13.res_post_norm1.0.bias", + "backbone.levels.2.blocks.13.res_post_norm2.0.weight", + "backbone.levels.2.blocks.13.res_post_norm2.0.bias" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias", + "backbone.levels.2.blocks.14.res_post_norm1.0.weight", + "backbone.levels.2.blocks.14.res_post_norm1.0.bias", + "backbone.levels.2.blocks.14.res_post_norm2.0.weight", + "backbone.levels.2.blocks.14.res_post_norm2.0.bias" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias", + "backbone.levels.2.blocks.15.res_post_norm1.0.weight", + "backbone.levels.2.blocks.15.res_post_norm1.0.bias", + "backbone.levels.2.blocks.15.res_post_norm2.0.weight", + "backbone.levels.2.blocks.15.res_post_norm2.0.bias" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias", + "backbone.levels.2.blocks.16.res_post_norm1.0.weight", + "backbone.levels.2.blocks.16.res_post_norm1.0.bias", + "backbone.levels.2.blocks.16.res_post_norm2.0.weight", + "backbone.levels.2.blocks.16.res_post_norm2.0.bias" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias", + "backbone.levels.2.blocks.17.res_post_norm1.0.weight", + "backbone.levels.2.blocks.17.res_post_norm1.0.bias", + "backbone.levels.2.blocks.17.res_post_norm2.0.weight", + "backbone.levels.2.blocks.17.res_post_norm2.0.bias" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias", + "backbone.levels.2.blocks.18.res_post_norm1.0.weight", + "backbone.levels.2.blocks.18.res_post_norm1.0.bias", + "backbone.levels.2.blocks.18.res_post_norm2.0.weight", + "backbone.levels.2.blocks.18.res_post_norm2.0.bias" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias", + "backbone.levels.2.blocks.19.res_post_norm1.0.weight", + "backbone.levels.2.blocks.19.res_post_norm1.0.bias", + "backbone.levels.2.blocks.19.res_post_norm2.0.weight", + "backbone.levels.2.blocks.19.res_post_norm2.0.bias" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias", + "backbone.levels.2.blocks.20.res_post_norm1.0.weight", + "backbone.levels.2.blocks.20.res_post_norm1.0.bias", + "backbone.levels.2.blocks.20.res_post_norm2.0.weight", + "backbone.levels.2.blocks.20.res_post_norm2.0.bias" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias", + "backbone.levels.2.blocks.21.res_post_norm1.0.weight", + "backbone.levels.2.blocks.21.res_post_norm1.0.bias", + "backbone.levels.2.blocks.21.res_post_norm2.0.weight", + "backbone.levels.2.blocks.21.res_post_norm2.0.bias" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias", + "backbone.levels.2.blocks.22.res_post_norm1.0.weight", + "backbone.levels.2.blocks.22.res_post_norm1.0.bias", + "backbone.levels.2.blocks.22.res_post_norm2.0.weight", + "backbone.levels.2.blocks.22.res_post_norm2.0.bias" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.0 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.05 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias", + "backbone.levels.2.blocks.23.res_post_norm1.0.weight", + "backbone.levels.2.blocks.23.res_post_norm1.0.bias", + "backbone.levels.2.blocks.23.res_post_norm2.0.weight", + "backbone.levels.2.blocks.23.res_post_norm2.0.bias" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.norm1.0.weight", + "backbone.levels.2.blocks.24.norm1.0.bias", + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.24.dcn.offset.bias", + "backbone.levels.2.blocks.24.dcn.mask.bias", + "backbone.levels.2.blocks.24.dcn.input_proj.bias", + "backbone.levels.2.blocks.24.dcn.output_proj.bias", + "backbone.levels.2.blocks.24.norm2.0.weight", + "backbone.levels.2.blocks.24.norm2.0.bias", + "backbone.levels.2.blocks.24.mlp.fc1.bias", + "backbone.levels.2.blocks.24.mlp.fc2.bias", + "backbone.levels.2.blocks.24.res_post_norm1.0.weight", + "backbone.levels.2.blocks.24.res_post_norm1.0.bias", + "backbone.levels.2.blocks.24.res_post_norm2.0.weight", + "backbone.levels.2.blocks.24.res_post_norm2.0.bias" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.24.dcn.offset.weight", + "backbone.levels.2.blocks.24.dcn.mask.weight", + "backbone.levels.2.blocks.24.dcn.input_proj.weight", + "backbone.levels.2.blocks.24.dcn.output_proj.weight", + "backbone.levels.2.blocks.24.mlp.fc1.weight", + "backbone.levels.2.blocks.24.mlp.fc2.weight" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.norm1.0.weight", + "backbone.levels.2.blocks.25.norm1.0.bias", + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.25.dcn.offset.bias", + "backbone.levels.2.blocks.25.dcn.mask.bias", + "backbone.levels.2.blocks.25.dcn.input_proj.bias", + "backbone.levels.2.blocks.25.dcn.output_proj.bias", + "backbone.levels.2.blocks.25.norm2.0.weight", + "backbone.levels.2.blocks.25.norm2.0.bias", + "backbone.levels.2.blocks.25.mlp.fc1.bias", + "backbone.levels.2.blocks.25.mlp.fc2.bias", + "backbone.levels.2.blocks.25.res_post_norm1.0.weight", + "backbone.levels.2.blocks.25.res_post_norm1.0.bias", + "backbone.levels.2.blocks.25.res_post_norm2.0.weight", + "backbone.levels.2.blocks.25.res_post_norm2.0.bias" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.25.dcn.offset.weight", + "backbone.levels.2.blocks.25.dcn.mask.weight", + "backbone.levels.2.blocks.25.dcn.input_proj.weight", + "backbone.levels.2.blocks.25.dcn.output_proj.weight", + "backbone.levels.2.blocks.25.mlp.fc1.weight", + "backbone.levels.2.blocks.25.mlp.fc2.weight" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.norm1.0.weight", + "backbone.levels.2.blocks.26.norm1.0.bias", + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.26.dcn.offset.bias", + "backbone.levels.2.blocks.26.dcn.mask.bias", + "backbone.levels.2.blocks.26.dcn.input_proj.bias", + "backbone.levels.2.blocks.26.dcn.output_proj.bias", + "backbone.levels.2.blocks.26.norm2.0.weight", + "backbone.levels.2.blocks.26.norm2.0.bias", + "backbone.levels.2.blocks.26.mlp.fc1.bias", + "backbone.levels.2.blocks.26.mlp.fc2.bias", + "backbone.levels.2.blocks.26.res_post_norm1.0.weight", + "backbone.levels.2.blocks.26.res_post_norm1.0.bias", + "backbone.levels.2.blocks.26.res_post_norm2.0.weight", + "backbone.levels.2.blocks.26.res_post_norm2.0.bias" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.26.dcn.offset.weight", + "backbone.levels.2.blocks.26.dcn.mask.weight", + "backbone.levels.2.blocks.26.dcn.input_proj.weight", + "backbone.levels.2.blocks.26.dcn.output_proj.weight", + "backbone.levels.2.blocks.26.mlp.fc1.weight", + "backbone.levels.2.blocks.26.mlp.fc2.weight" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.norm1.0.weight", + "backbone.levels.2.blocks.27.norm1.0.bias", + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.27.dcn.offset.bias", + "backbone.levels.2.blocks.27.dcn.mask.bias", + "backbone.levels.2.blocks.27.dcn.input_proj.bias", + "backbone.levels.2.blocks.27.dcn.output_proj.bias", + "backbone.levels.2.blocks.27.norm2.0.weight", + "backbone.levels.2.blocks.27.norm2.0.bias", + "backbone.levels.2.blocks.27.mlp.fc1.bias", + "backbone.levels.2.blocks.27.mlp.fc2.bias", + "backbone.levels.2.blocks.27.res_post_norm1.0.weight", + "backbone.levels.2.blocks.27.res_post_norm1.0.bias", + "backbone.levels.2.blocks.27.res_post_norm2.0.weight", + "backbone.levels.2.blocks.27.res_post_norm2.0.bias" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.0 + }, + "layer_40_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.27.dcn.offset.weight", + "backbone.levels.2.blocks.27.dcn.mask.weight", + "backbone.levels.2.blocks.27.dcn.input_proj.weight", + "backbone.levels.2.blocks.27.dcn.output_proj.weight", + "backbone.levels.2.blocks.27.mlp.fc1.weight", + "backbone.levels.2.blocks.27.mlp.fc2.weight" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.05 + }, + "layer_41_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.norm1.0.weight", + "backbone.levels.2.blocks.28.norm1.0.bias", + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.28.dcn.offset.bias", + "backbone.levels.2.blocks.28.dcn.mask.bias", + "backbone.levels.2.blocks.28.dcn.input_proj.bias", + "backbone.levels.2.blocks.28.dcn.output_proj.bias", + "backbone.levels.2.blocks.28.norm2.0.weight", + "backbone.levels.2.blocks.28.norm2.0.bias", + "backbone.levels.2.blocks.28.mlp.fc1.bias", + "backbone.levels.2.blocks.28.mlp.fc2.bias", + "backbone.levels.2.blocks.28.res_post_norm1.0.weight", + "backbone.levels.2.blocks.28.res_post_norm1.0.bias", + "backbone.levels.2.blocks.28.res_post_norm2.0.weight", + "backbone.levels.2.blocks.28.res_post_norm2.0.bias" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.0 + }, + "layer_41_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.28.dcn.offset.weight", + "backbone.levels.2.blocks.28.dcn.mask.weight", + "backbone.levels.2.blocks.28.dcn.input_proj.weight", + "backbone.levels.2.blocks.28.dcn.output_proj.weight", + "backbone.levels.2.blocks.28.mlp.fc1.weight", + "backbone.levels.2.blocks.28.mlp.fc2.weight" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.05 + }, + "layer_42_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.norm1.0.weight", + "backbone.levels.2.blocks.29.norm1.0.bias", + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.29.dcn.offset.bias", + "backbone.levels.2.blocks.29.dcn.mask.bias", + "backbone.levels.2.blocks.29.dcn.input_proj.bias", + "backbone.levels.2.blocks.29.dcn.output_proj.bias", + "backbone.levels.2.blocks.29.norm2.0.weight", + "backbone.levels.2.blocks.29.norm2.0.bias", + "backbone.levels.2.blocks.29.mlp.fc1.bias", + "backbone.levels.2.blocks.29.mlp.fc2.bias", + "backbone.levels.2.blocks.29.res_post_norm1.0.weight", + "backbone.levels.2.blocks.29.res_post_norm1.0.bias", + "backbone.levels.2.blocks.29.res_post_norm2.0.weight", + "backbone.levels.2.blocks.29.res_post_norm2.0.bias" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.0 + }, + "layer_42_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.29.dcn.offset.weight", + "backbone.levels.2.blocks.29.dcn.mask.weight", + "backbone.levels.2.blocks.29.dcn.input_proj.weight", + "backbone.levels.2.blocks.29.dcn.output_proj.weight", + "backbone.levels.2.blocks.29.mlp.fc1.weight", + "backbone.levels.2.blocks.29.mlp.fc2.weight" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.05 + }, + "layer_43_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.norm1.0.weight", + "backbone.levels.2.blocks.30.norm1.0.bias", + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.30.dcn.offset.bias", + "backbone.levels.2.blocks.30.dcn.mask.bias", + "backbone.levels.2.blocks.30.dcn.input_proj.bias", + "backbone.levels.2.blocks.30.dcn.output_proj.bias", + "backbone.levels.2.blocks.30.norm2.0.weight", + "backbone.levels.2.blocks.30.norm2.0.bias", + "backbone.levels.2.blocks.30.mlp.fc1.bias", + "backbone.levels.2.blocks.30.mlp.fc2.bias", + "backbone.levels.2.blocks.30.res_post_norm1.0.weight", + "backbone.levels.2.blocks.30.res_post_norm1.0.bias", + "backbone.levels.2.blocks.30.res_post_norm2.0.weight", + "backbone.levels.2.blocks.30.res_post_norm2.0.bias" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.0 + }, + "layer_43_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.30.dcn.offset.weight", + "backbone.levels.2.blocks.30.dcn.mask.weight", + "backbone.levels.2.blocks.30.dcn.input_proj.weight", + "backbone.levels.2.blocks.30.dcn.output_proj.weight", + "backbone.levels.2.blocks.30.mlp.fc1.weight", + "backbone.levels.2.blocks.30.mlp.fc2.weight" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.05 + }, + "layer_44_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.norm1.0.weight", + "backbone.levels.2.blocks.31.norm1.0.bias", + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.31.dcn.offset.bias", + "backbone.levels.2.blocks.31.dcn.mask.bias", + "backbone.levels.2.blocks.31.dcn.input_proj.bias", + "backbone.levels.2.blocks.31.dcn.output_proj.bias", + "backbone.levels.2.blocks.31.norm2.0.weight", + "backbone.levels.2.blocks.31.norm2.0.bias", + "backbone.levels.2.blocks.31.mlp.fc1.bias", + "backbone.levels.2.blocks.31.mlp.fc2.bias", + "backbone.levels.2.blocks.31.res_post_norm1.0.weight", + "backbone.levels.2.blocks.31.res_post_norm1.0.bias", + "backbone.levels.2.blocks.31.res_post_norm2.0.weight", + "backbone.levels.2.blocks.31.res_post_norm2.0.bias" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.0 + }, + "layer_44_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.31.dcn.offset.weight", + "backbone.levels.2.blocks.31.dcn.mask.weight", + "backbone.levels.2.blocks.31.dcn.input_proj.weight", + "backbone.levels.2.blocks.31.dcn.output_proj.weight", + "backbone.levels.2.blocks.31.mlp.fc1.weight", + "backbone.levels.2.blocks.31.mlp.fc2.weight" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.05 + }, + "layer_45_no_decay": { + "param_names": [ + "backbone.levels.2.norm.0.weight", + "backbone.levels.2.norm.0.bias", + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias", + "backbone.levels.3.blocks.0.res_post_norm1.0.weight", + "backbone.levels.3.blocks.0.res_post_norm1.0.bias", + "backbone.levels.3.blocks.0.res_post_norm2.0.weight", + "backbone.levels.3.blocks.0.res_post_norm2.0.bias", + "backbone.levels.3.norm.0.weight", + "backbone.levels.3.norm.0.bias" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.0 + }, + "layer_45_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.05 + }, + "layer_46_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias", + "backbone.levels.3.blocks.1.res_post_norm1.0.weight", + "backbone.levels.3.blocks.1.res_post_norm1.0.bias", + "backbone.levels.3.blocks.1.res_post_norm2.0.weight", + "backbone.levels.3.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.0 + }, + "layer_46_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.05 + }, + "layer_47_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias", + "backbone.levels.3.blocks.2.res_post_norm1.0.weight", + "backbone.levels.3.blocks.2.res_post_norm1.0.bias", + "backbone.levels.3.blocks.2.res_post_norm2.0.weight", + "backbone.levels.3.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.0 + }, + "layer_47_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.05 + }, + "layer_48_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias", + "backbone.levels.3.blocks.3.res_post_norm1.0.weight", + "backbone.levels.3.blocks.3.res_post_norm1.0.bias", + "backbone.levels.3.blocks.3.res_post_norm2.0.weight", + "backbone.levels.3.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.0 + }, + "layer_48_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.05 + }, + "layer_49_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias", + "backbone.levels.3.blocks.4.res_post_norm1.0.weight", + "backbone.levels.3.blocks.4.res_post_norm1.0.bias", + "backbone.levels.3.blocks.4.res_post_norm2.0.weight", + "backbone.levels.3.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.0 + }, + "layer_49_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.05 + }, + "layer_50_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.norm1.0.weight", + "backbone.levels.3.blocks.5.norm1.0.bias", + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.5.dcn.offset.bias", + "backbone.levels.3.blocks.5.dcn.mask.bias", + "backbone.levels.3.blocks.5.dcn.input_proj.bias", + "backbone.levels.3.blocks.5.dcn.output_proj.bias", + "backbone.levels.3.blocks.5.norm2.0.weight", + "backbone.levels.3.blocks.5.norm2.0.bias", + "backbone.levels.3.blocks.5.mlp.fc1.bias", + "backbone.levels.3.blocks.5.mlp.fc2.bias", + "backbone.levels.3.blocks.5.res_post_norm1.0.weight", + "backbone.levels.3.blocks.5.res_post_norm1.0.bias", + "backbone.levels.3.blocks.5.res_post_norm2.0.weight", + "backbone.levels.3.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.0 + }, + "layer_50_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.5.dcn.offset.weight", + "backbone.levels.3.blocks.5.dcn.mask.weight", + "backbone.levels.3.blocks.5.dcn.input_proj.weight", + "backbone.levels.3.blocks.5.dcn.output_proj.weight", + "backbone.levels.3.blocks.5.mlp.fc1.weight", + "backbone.levels.3.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.05 + }, + "layer_51_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.weight", + "decode_head.pixel_decoder.input_convs.1.conv.weight", + "decode_head.pixel_decoder.input_convs.2.conv.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.level_encoding.weight", + "decode_head.pixel_decoder.lateral_convs.0.conv.weight", + "decode_head.pixel_decoder.output_convs.0.conv.weight", + "decode_head.pixel_decoder.mask_feature.weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.05 + }, + "layer_51_no_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.bias", + "decode_head.pixel_decoder.input_convs.0.gn.weight", + "decode_head.pixel_decoder.input_convs.0.gn.bias", + "decode_head.pixel_decoder.input_convs.1.conv.bias", + "decode_head.pixel_decoder.input_convs.1.gn.weight", + "decode_head.pixel_decoder.input_convs.1.gn.bias", + "decode_head.pixel_decoder.input_convs.2.conv.bias", + "decode_head.pixel_decoder.input_convs.2.gn.weight", + "decode_head.pixel_decoder.input_convs.2.gn.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.bias", + "decode_head.pixel_decoder.lateral_convs.0.gn.weight", + "decode_head.pixel_decoder.lateral_convs.0.gn.bias", + "decode_head.pixel_decoder.output_convs.0.gn.weight", + "decode_head.pixel_decoder.output_convs.0.gn.bias", + "decode_head.pixel_decoder.mask_feature.bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.0.norms.0.weight", + "decode_head.transformer_decoder.layers.0.norms.0.bias", + "decode_head.transformer_decoder.layers.0.norms.1.weight", + "decode_head.transformer_decoder.layers.0.norms.1.bias", + "decode_head.transformer_decoder.layers.0.norms.2.weight", + "decode_head.transformer_decoder.layers.0.norms.2.bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.1.norms.0.weight", + "decode_head.transformer_decoder.layers.1.norms.0.bias", + "decode_head.transformer_decoder.layers.1.norms.1.weight", + "decode_head.transformer_decoder.layers.1.norms.1.bias", + "decode_head.transformer_decoder.layers.1.norms.2.weight", + "decode_head.transformer_decoder.layers.1.norms.2.bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.2.norms.0.weight", + "decode_head.transformer_decoder.layers.2.norms.0.bias", + "decode_head.transformer_decoder.layers.2.norms.1.weight", + "decode_head.transformer_decoder.layers.2.norms.1.bias", + "decode_head.transformer_decoder.layers.2.norms.2.weight", + "decode_head.transformer_decoder.layers.2.norms.2.bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.3.norms.0.weight", + "decode_head.transformer_decoder.layers.3.norms.0.bias", + "decode_head.transformer_decoder.layers.3.norms.1.weight", + "decode_head.transformer_decoder.layers.3.norms.1.bias", + "decode_head.transformer_decoder.layers.3.norms.2.weight", + "decode_head.transformer_decoder.layers.3.norms.2.bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.4.norms.0.weight", + "decode_head.transformer_decoder.layers.4.norms.0.bias", + "decode_head.transformer_decoder.layers.4.norms.1.weight", + "decode_head.transformer_decoder.layers.4.norms.1.bias", + "decode_head.transformer_decoder.layers.4.norms.2.weight", + "decode_head.transformer_decoder.layers.4.norms.2.bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.5.norms.0.weight", + "decode_head.transformer_decoder.layers.5.norms.0.bias", + "decode_head.transformer_decoder.layers.5.norms.1.weight", + "decode_head.transformer_decoder.layers.5.norms.1.bias", + "decode_head.transformer_decoder.layers.5.norms.2.weight", + "decode_head.transformer_decoder.layers.5.norms.2.bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.6.norms.0.weight", + "decode_head.transformer_decoder.layers.6.norms.0.bias", + "decode_head.transformer_decoder.layers.6.norms.1.weight", + "decode_head.transformer_decoder.layers.6.norms.1.bias", + "decode_head.transformer_decoder.layers.6.norms.2.weight", + "decode_head.transformer_decoder.layers.6.norms.2.bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.7.norms.0.weight", + "decode_head.transformer_decoder.layers.7.norms.0.bias", + "decode_head.transformer_decoder.layers.7.norms.1.weight", + "decode_head.transformer_decoder.layers.7.norms.1.bias", + "decode_head.transformer_decoder.layers.7.norms.2.weight", + "decode_head.transformer_decoder.layers.7.norms.2.bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.8.norms.0.weight", + "decode_head.transformer_decoder.layers.8.norms.0.bias", + "decode_head.transformer_decoder.layers.8.norms.1.weight", + "decode_head.transformer_decoder.layers.8.norms.1.bias", + "decode_head.transformer_decoder.layers.8.norms.2.weight", + "decode_head.transformer_decoder.layers.8.norms.2.bias", + "decode_head.transformer_decoder.post_norm.weight", + "decode_head.transformer_decoder.post_norm.bias" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.0 + } +} +2025-05-28 02:55:44,925 - mmseg - INFO - Loaded 1159 images +2025-05-28 02:55:44,926 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth +2025-05-28 02:55:50,683 - mmseg - INFO - Start running, host: yiming@pasteur2.stanford.edu, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 02:55:50,683 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 02:55:50,684 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters +2025-05-28 02:55:50,684 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025504.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025504.log.json new file mode 100644 index 0000000000000000000000000000000000000000..01d530fe7897dd5cee96535268008621756858b8 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025504.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0: NVIDIA TITAN RTX\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 12.2, V12.2.91\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 804528400, "exp_name": "mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "num_things_classes = 100\nnum_stuff_classes = 50\nnum_classes = 19\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderMask2Former',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=320,\n depths=[6, 6, 32, 6],\n groups=[10, 20, 40, 80],\n mlp_ratio=4.0,\n drop_path_rate=0.5,\n norm_layer='LN',\n layer_scale=None,\n offset_scale=1.0,\n post_norm=False,\n dw_kernel_size=5,\n res_post_norm=True,\n level2_post_norm=True,\n level2_post_norm_block_ids=[5, 11, 17, 23, 29],\n center_feature_scale=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='Mask2FormerHead',\n in_channels=[320, 640, 1280, 2560],\n feat_channels=256,\n out_channels=256,\n in_index=[0, 1, 2, 3],\n num_things_classes=100,\n num_stuff_classes=50,\n num_queries=100,\n num_transformer_feat_level=3,\n pixel_decoder=dict(\n type='MSDeformAttnPixelDecoder',\n num_outs=3,\n norm_cfg=dict(type='GN', num_groups=32),\n act_cfg=dict(type='ReLU'),\n encoder=dict(\n type='DetrTransformerEncoder',\n num_layers=6,\n transformerlayers=dict(\n type='BaseTransformerLayer',\n attn_cfgs=dict(\n type='MultiScaleDeformableAttention',\n embed_dims=256,\n num_heads=8,\n num_levels=3,\n num_points=4,\n im2col_step=64,\n dropout=0.0,\n batch_first=False,\n norm_cfg=None,\n init_cfg=None),\n ffn_cfgs=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n with_cp=False),\n operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n init_cfg=None),\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n init_cfg=None),\n enforce_decoder_input_project=False,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n transformer_decoder=dict(\n type='DetrTransformerDecoder',\n return_intermediate=True,\n num_layers=9,\n transformerlayers=dict(\n type='DetrTransformerDecoderLayer',\n attn_cfgs=dict(\n type='MultiheadAttention',\n embed_dims=256,\n num_heads=8,\n attn_drop=0.0,\n proj_drop=0.0,\n dropout_layer=None,\n batch_first=False),\n ffn_cfgs=dict(\n embed_dims=256,\n feedforward_channels=2048,\n num_fcs=2,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_drop=0.0,\n dropout_layer=None,\n add_identity=True,\n with_cp=False),\n feedforward_channels=2048,\n operation_order=('cross_attn', 'norm', 'self_attn', 'norm',\n 'ffn', 'norm')),\n init_cfg=None),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=2.0,\n reduction='mean',\n class_weight=[\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1\n ]),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=5.0),\n loss_dice=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n reduction='mean',\n naive_dice=True,\n eps=1.0,\n loss_weight=5.0),\n num_classes=19,\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512))),\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512)),\n init_cfg=None)\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (1024, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(\n type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=1e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=50,\n layer_decay_rate=0.95,\n depths=[6, 6, 32, 6],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 1)\nauto_resume = False\ndevice = 'cuda'\nseed = 804528400\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025728.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025728.log new file mode 100644 index 0000000000000000000000000000000000000000..402b8bcef7f7ce48285ff6295df9cebaa099c6d7 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025728.log @@ -0,0 +1,7658 @@ +2025-05-28 02:57:28,792 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 02:57:28,793 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 02:57:28,830 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 02:57:28,831 - mmseg - INFO - Distributed training: True +2025-05-28 02:57:29,309 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 02:57:29,310 - mmseg - INFO - Set random seed to 2000367376, deterministic: False +2025-05-28 02:57:29,311 - mmseg - INFO - using core type: DCNv3 +2025-05-28 02:57:29,311 - mmseg - INFO - using activation layer: GELU +2025-05-28 02:57:29,311 - mmseg - INFO - using main norm layer: LN +2025-05-28 02:57:29,311 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 02:57:29,311 - mmseg - INFO - level2_post_norm: True +2025-05-28 02:57:29,312 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 02:57:29,312 - mmseg - INFO - res_post_norm: True +2025-05-28 02:57:29,312 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 02:57:51,558 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 02:58:05,901 - mmseg - INFO - Loaded 2316 images +2025-05-28 02:58:05,901 - mmseg - INFO - Built training dataset from config: {'type': 'CityscapesDataset', 'data_root': '/pasteur/u/yiming/homework4/cityscapes', 'img_dir': 'leftImg8bit/', 'ann_dir': 'gtFine/', 'pipeline': [{'type': 'LoadImageFromFile'}, {'type': 'LoadAnnotations'}, {'type': 'Resize', 'img_scale': (2048, 1024), 'ratio_range': (0.5, 2.0)}, {'type': 'RandomCrop', 'crop_size': (1024, 1024), 'cat_max_ratio': 0.75}, {'type': 'RandomFlip', 'prob': 0.5}, {'type': 'PhotoMetricDistortion'}, {'type': 'Normalize', 'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True}, {'type': 'Pad', 'size': (1024, 1024), 'pad_val': 0, 'seg_pad_val': 255}, {'type': 'ToMask'}, {'type': 'DefaultFormatBundle'}, {'type': 'Collect', 'keys': ['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']}], 'split': 'splits/fold_1_train_split.txt'} +2025-05-28 02:58:05,902 - mmseg - INFO - Number of samples in training dataset: 2316 +2025-05-28 02:58:11,545 - mmseg - INFO - {'num_layers': 50, 'layer_decay_rate': 0.95, 'depths': [6, 6, 32, 6], 'offset_lr_scale': 1.0} +2025-05-28 02:58:11,545 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.950000 - 52 +2025-05-28 02:58:11,555 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight", + "decode_head.query_embed.weight", + "decode_head.query_feat.weight", + "decode_head.level_embed.weight", + "decode_head.cls_embed.weight", + "decode_head.mask_embed.0.weight", + "decode_head.mask_embed.2.weight", + "decode_head.mask_embed.4.weight" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias", + "decode_head.cls_embed.bias", + "decode_head.mask_embed.0.bias", + "decode_head.mask_embed.2.bias", + "decode_head.mask_embed.4.bias" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias", + "backbone.levels.0.blocks.0.res_post_norm1.0.weight", + "backbone.levels.0.blocks.0.res_post_norm1.0.bias", + "backbone.levels.0.blocks.0.res_post_norm2.0.weight", + "backbone.levels.0.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias", + "backbone.levels.0.blocks.1.res_post_norm1.0.weight", + "backbone.levels.0.blocks.1.res_post_norm1.0.bias", + "backbone.levels.0.blocks.1.res_post_norm2.0.weight", + "backbone.levels.0.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias", + "backbone.levels.0.blocks.2.res_post_norm1.0.weight", + "backbone.levels.0.blocks.2.res_post_norm1.0.bias", + "backbone.levels.0.blocks.2.res_post_norm2.0.weight", + "backbone.levels.0.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias", + "backbone.levels.0.blocks.3.res_post_norm1.0.weight", + "backbone.levels.0.blocks.3.res_post_norm1.0.bias", + "backbone.levels.0.blocks.3.res_post_norm2.0.weight", + "backbone.levels.0.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias", + "backbone.levels.0.blocks.4.res_post_norm1.0.weight", + "backbone.levels.0.blocks.4.res_post_norm1.0.bias", + "backbone.levels.0.blocks.4.res_post_norm2.0.weight", + "backbone.levels.0.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.norm1.0.weight", + "backbone.levels.0.blocks.5.norm1.0.bias", + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.5.dcn.offset.bias", + "backbone.levels.0.blocks.5.dcn.mask.bias", + "backbone.levels.0.blocks.5.dcn.input_proj.bias", + "backbone.levels.0.blocks.5.dcn.output_proj.bias", + "backbone.levels.0.blocks.5.norm2.0.weight", + "backbone.levels.0.blocks.5.norm2.0.bias", + "backbone.levels.0.blocks.5.mlp.fc1.bias", + "backbone.levels.0.blocks.5.mlp.fc2.bias", + "backbone.levels.0.blocks.5.res_post_norm1.0.weight", + "backbone.levels.0.blocks.5.res_post_norm1.0.bias", + "backbone.levels.0.blocks.5.res_post_norm2.0.weight", + "backbone.levels.0.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.0 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.5.dcn.offset.weight", + "backbone.levels.0.blocks.5.dcn.mask.weight", + "backbone.levels.0.blocks.5.dcn.input_proj.weight", + "backbone.levels.0.blocks.5.dcn.output_proj.weight", + "backbone.levels.0.blocks.5.mlp.fc1.weight", + "backbone.levels.0.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.05 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.0.norm.0.weight", + "backbone.levels.0.norm.0.bias", + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias", + "backbone.levels.1.blocks.0.res_post_norm1.0.weight", + "backbone.levels.1.blocks.0.res_post_norm1.0.bias", + "backbone.levels.1.blocks.0.res_post_norm2.0.weight", + "backbone.levels.1.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias", + "backbone.levels.1.blocks.1.res_post_norm1.0.weight", + "backbone.levels.1.blocks.1.res_post_norm1.0.bias", + "backbone.levels.1.blocks.1.res_post_norm2.0.weight", + "backbone.levels.1.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias", + "backbone.levels.1.blocks.2.res_post_norm1.0.weight", + "backbone.levels.1.blocks.2.res_post_norm1.0.bias", + "backbone.levels.1.blocks.2.res_post_norm2.0.weight", + "backbone.levels.1.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias", + "backbone.levels.1.blocks.3.res_post_norm1.0.weight", + "backbone.levels.1.blocks.3.res_post_norm1.0.bias", + "backbone.levels.1.blocks.3.res_post_norm2.0.weight", + "backbone.levels.1.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias", + "backbone.levels.1.blocks.4.res_post_norm1.0.weight", + "backbone.levels.1.blocks.4.res_post_norm1.0.bias", + "backbone.levels.1.blocks.4.res_post_norm2.0.weight", + "backbone.levels.1.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.0 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.05 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.norm1.0.weight", + "backbone.levels.1.blocks.5.norm1.0.bias", + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.5.dcn.offset.bias", + "backbone.levels.1.blocks.5.dcn.mask.bias", + "backbone.levels.1.blocks.5.dcn.input_proj.bias", + "backbone.levels.1.blocks.5.dcn.output_proj.bias", + "backbone.levels.1.blocks.5.norm2.0.weight", + "backbone.levels.1.blocks.5.norm2.0.bias", + "backbone.levels.1.blocks.5.mlp.fc1.bias", + "backbone.levels.1.blocks.5.mlp.fc2.bias", + "backbone.levels.1.blocks.5.res_post_norm1.0.weight", + "backbone.levels.1.blocks.5.res_post_norm1.0.bias", + "backbone.levels.1.blocks.5.res_post_norm2.0.weight", + "backbone.levels.1.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.5.dcn.offset.weight", + "backbone.levels.1.blocks.5.dcn.mask.weight", + "backbone.levels.1.blocks.5.dcn.input_proj.weight", + "backbone.levels.1.blocks.5.dcn.output_proj.weight", + "backbone.levels.1.blocks.5.mlp.fc1.weight", + "backbone.levels.1.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.1.norm.0.weight", + "backbone.levels.1.norm.0.bias", + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias", + "backbone.levels.2.blocks.0.res_post_norm1.0.weight", + "backbone.levels.2.blocks.0.res_post_norm1.0.bias", + "backbone.levels.2.blocks.0.res_post_norm2.0.weight", + "backbone.levels.2.blocks.0.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.0.0.weight", + "backbone.levels.2.post_norms.0.0.bias" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias", + "backbone.levels.2.blocks.1.res_post_norm1.0.weight", + "backbone.levels.2.blocks.1.res_post_norm1.0.bias", + "backbone.levels.2.blocks.1.res_post_norm2.0.weight", + "backbone.levels.2.blocks.1.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.1.0.weight", + "backbone.levels.2.post_norms.1.0.bias" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias", + "backbone.levels.2.blocks.2.res_post_norm1.0.weight", + "backbone.levels.2.blocks.2.res_post_norm1.0.bias", + "backbone.levels.2.blocks.2.res_post_norm2.0.weight", + "backbone.levels.2.blocks.2.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.2.0.weight", + "backbone.levels.2.post_norms.2.0.bias" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias", + "backbone.levels.2.blocks.3.res_post_norm1.0.weight", + "backbone.levels.2.blocks.3.res_post_norm1.0.bias", + "backbone.levels.2.blocks.3.res_post_norm2.0.weight", + "backbone.levels.2.blocks.3.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.3.0.weight", + "backbone.levels.2.post_norms.3.0.bias" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias", + "backbone.levels.2.blocks.4.res_post_norm1.0.weight", + "backbone.levels.2.blocks.4.res_post_norm1.0.bias", + "backbone.levels.2.blocks.4.res_post_norm2.0.weight", + "backbone.levels.2.blocks.4.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.4.0.weight", + "backbone.levels.2.post_norms.4.0.bias" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias", + "backbone.levels.2.blocks.5.res_post_norm1.0.weight", + "backbone.levels.2.blocks.5.res_post_norm1.0.bias", + "backbone.levels.2.blocks.5.res_post_norm2.0.weight", + "backbone.levels.2.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias", + "backbone.levels.2.blocks.6.res_post_norm1.0.weight", + "backbone.levels.2.blocks.6.res_post_norm1.0.bias", + "backbone.levels.2.blocks.6.res_post_norm2.0.weight", + "backbone.levels.2.blocks.6.res_post_norm2.0.bias" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias", + "backbone.levels.2.blocks.7.res_post_norm1.0.weight", + "backbone.levels.2.blocks.7.res_post_norm1.0.bias", + "backbone.levels.2.blocks.7.res_post_norm2.0.weight", + "backbone.levels.2.blocks.7.res_post_norm2.0.bias" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias", + "backbone.levels.2.blocks.8.res_post_norm1.0.weight", + "backbone.levels.2.blocks.8.res_post_norm1.0.bias", + "backbone.levels.2.blocks.8.res_post_norm2.0.weight", + "backbone.levels.2.blocks.8.res_post_norm2.0.bias" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias", + "backbone.levels.2.blocks.9.res_post_norm1.0.weight", + "backbone.levels.2.blocks.9.res_post_norm1.0.bias", + "backbone.levels.2.blocks.9.res_post_norm2.0.weight", + "backbone.levels.2.blocks.9.res_post_norm2.0.bias" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias", + "backbone.levels.2.blocks.10.res_post_norm1.0.weight", + "backbone.levels.2.blocks.10.res_post_norm1.0.bias", + "backbone.levels.2.blocks.10.res_post_norm2.0.weight", + "backbone.levels.2.blocks.10.res_post_norm2.0.bias" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias", + "backbone.levels.2.blocks.11.res_post_norm1.0.weight", + "backbone.levels.2.blocks.11.res_post_norm1.0.bias", + "backbone.levels.2.blocks.11.res_post_norm2.0.weight", + "backbone.levels.2.blocks.11.res_post_norm2.0.bias" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias", + "backbone.levels.2.blocks.12.res_post_norm1.0.weight", + "backbone.levels.2.blocks.12.res_post_norm1.0.bias", + "backbone.levels.2.blocks.12.res_post_norm2.0.weight", + "backbone.levels.2.blocks.12.res_post_norm2.0.bias" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias", + "backbone.levels.2.blocks.13.res_post_norm1.0.weight", + "backbone.levels.2.blocks.13.res_post_norm1.0.bias", + "backbone.levels.2.blocks.13.res_post_norm2.0.weight", + "backbone.levels.2.blocks.13.res_post_norm2.0.bias" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias", + "backbone.levels.2.blocks.14.res_post_norm1.0.weight", + "backbone.levels.2.blocks.14.res_post_norm1.0.bias", + "backbone.levels.2.blocks.14.res_post_norm2.0.weight", + "backbone.levels.2.blocks.14.res_post_norm2.0.bias" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias", + "backbone.levels.2.blocks.15.res_post_norm1.0.weight", + "backbone.levels.2.blocks.15.res_post_norm1.0.bias", + "backbone.levels.2.blocks.15.res_post_norm2.0.weight", + "backbone.levels.2.blocks.15.res_post_norm2.0.bias" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias", + "backbone.levels.2.blocks.16.res_post_norm1.0.weight", + "backbone.levels.2.blocks.16.res_post_norm1.0.bias", + "backbone.levels.2.blocks.16.res_post_norm2.0.weight", + "backbone.levels.2.blocks.16.res_post_norm2.0.bias" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias", + "backbone.levels.2.blocks.17.res_post_norm1.0.weight", + "backbone.levels.2.blocks.17.res_post_norm1.0.bias", + "backbone.levels.2.blocks.17.res_post_norm2.0.weight", + "backbone.levels.2.blocks.17.res_post_norm2.0.bias" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias", + "backbone.levels.2.blocks.18.res_post_norm1.0.weight", + "backbone.levels.2.blocks.18.res_post_norm1.0.bias", + "backbone.levels.2.blocks.18.res_post_norm2.0.weight", + "backbone.levels.2.blocks.18.res_post_norm2.0.bias" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias", + "backbone.levels.2.blocks.19.res_post_norm1.0.weight", + "backbone.levels.2.blocks.19.res_post_norm1.0.bias", + "backbone.levels.2.blocks.19.res_post_norm2.0.weight", + "backbone.levels.2.blocks.19.res_post_norm2.0.bias" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias", + "backbone.levels.2.blocks.20.res_post_norm1.0.weight", + "backbone.levels.2.blocks.20.res_post_norm1.0.bias", + "backbone.levels.2.blocks.20.res_post_norm2.0.weight", + "backbone.levels.2.blocks.20.res_post_norm2.0.bias" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias", + "backbone.levels.2.blocks.21.res_post_norm1.0.weight", + "backbone.levels.2.blocks.21.res_post_norm1.0.bias", + "backbone.levels.2.blocks.21.res_post_norm2.0.weight", + "backbone.levels.2.blocks.21.res_post_norm2.0.bias" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias", + "backbone.levels.2.blocks.22.res_post_norm1.0.weight", + "backbone.levels.2.blocks.22.res_post_norm1.0.bias", + "backbone.levels.2.blocks.22.res_post_norm2.0.weight", + "backbone.levels.2.blocks.22.res_post_norm2.0.bias" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.0 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.05 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias", + "backbone.levels.2.blocks.23.res_post_norm1.0.weight", + "backbone.levels.2.blocks.23.res_post_norm1.0.bias", + "backbone.levels.2.blocks.23.res_post_norm2.0.weight", + "backbone.levels.2.blocks.23.res_post_norm2.0.bias" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.norm1.0.weight", + "backbone.levels.2.blocks.24.norm1.0.bias", + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.24.dcn.offset.bias", + "backbone.levels.2.blocks.24.dcn.mask.bias", + "backbone.levels.2.blocks.24.dcn.input_proj.bias", + "backbone.levels.2.blocks.24.dcn.output_proj.bias", + "backbone.levels.2.blocks.24.norm2.0.weight", + "backbone.levels.2.blocks.24.norm2.0.bias", + "backbone.levels.2.blocks.24.mlp.fc1.bias", + "backbone.levels.2.blocks.24.mlp.fc2.bias", + "backbone.levels.2.blocks.24.res_post_norm1.0.weight", + "backbone.levels.2.blocks.24.res_post_norm1.0.bias", + "backbone.levels.2.blocks.24.res_post_norm2.0.weight", + "backbone.levels.2.blocks.24.res_post_norm2.0.bias" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.24.dcn.offset.weight", + "backbone.levels.2.blocks.24.dcn.mask.weight", + "backbone.levels.2.blocks.24.dcn.input_proj.weight", + "backbone.levels.2.blocks.24.dcn.output_proj.weight", + "backbone.levels.2.blocks.24.mlp.fc1.weight", + "backbone.levels.2.blocks.24.mlp.fc2.weight" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.norm1.0.weight", + "backbone.levels.2.blocks.25.norm1.0.bias", + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.25.dcn.offset.bias", + "backbone.levels.2.blocks.25.dcn.mask.bias", + "backbone.levels.2.blocks.25.dcn.input_proj.bias", + "backbone.levels.2.blocks.25.dcn.output_proj.bias", + "backbone.levels.2.blocks.25.norm2.0.weight", + "backbone.levels.2.blocks.25.norm2.0.bias", + "backbone.levels.2.blocks.25.mlp.fc1.bias", + "backbone.levels.2.blocks.25.mlp.fc2.bias", + "backbone.levels.2.blocks.25.res_post_norm1.0.weight", + "backbone.levels.2.blocks.25.res_post_norm1.0.bias", + "backbone.levels.2.blocks.25.res_post_norm2.0.weight", + "backbone.levels.2.blocks.25.res_post_norm2.0.bias" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.25.dcn.offset.weight", + "backbone.levels.2.blocks.25.dcn.mask.weight", + "backbone.levels.2.blocks.25.dcn.input_proj.weight", + "backbone.levels.2.blocks.25.dcn.output_proj.weight", + "backbone.levels.2.blocks.25.mlp.fc1.weight", + "backbone.levels.2.blocks.25.mlp.fc2.weight" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.norm1.0.weight", + "backbone.levels.2.blocks.26.norm1.0.bias", + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.26.dcn.offset.bias", + "backbone.levels.2.blocks.26.dcn.mask.bias", + "backbone.levels.2.blocks.26.dcn.input_proj.bias", + "backbone.levels.2.blocks.26.dcn.output_proj.bias", + "backbone.levels.2.blocks.26.norm2.0.weight", + "backbone.levels.2.blocks.26.norm2.0.bias", + "backbone.levels.2.blocks.26.mlp.fc1.bias", + "backbone.levels.2.blocks.26.mlp.fc2.bias", + "backbone.levels.2.blocks.26.res_post_norm1.0.weight", + "backbone.levels.2.blocks.26.res_post_norm1.0.bias", + "backbone.levels.2.blocks.26.res_post_norm2.0.weight", + "backbone.levels.2.blocks.26.res_post_norm2.0.bias" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.26.dcn.offset.weight", + "backbone.levels.2.blocks.26.dcn.mask.weight", + "backbone.levels.2.blocks.26.dcn.input_proj.weight", + "backbone.levels.2.blocks.26.dcn.output_proj.weight", + "backbone.levels.2.blocks.26.mlp.fc1.weight", + "backbone.levels.2.blocks.26.mlp.fc2.weight" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.norm1.0.weight", + "backbone.levels.2.blocks.27.norm1.0.bias", + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.27.dcn.offset.bias", + "backbone.levels.2.blocks.27.dcn.mask.bias", + "backbone.levels.2.blocks.27.dcn.input_proj.bias", + "backbone.levels.2.blocks.27.dcn.output_proj.bias", + "backbone.levels.2.blocks.27.norm2.0.weight", + "backbone.levels.2.blocks.27.norm2.0.bias", + "backbone.levels.2.blocks.27.mlp.fc1.bias", + "backbone.levels.2.blocks.27.mlp.fc2.bias", + "backbone.levels.2.blocks.27.res_post_norm1.0.weight", + "backbone.levels.2.blocks.27.res_post_norm1.0.bias", + "backbone.levels.2.blocks.27.res_post_norm2.0.weight", + "backbone.levels.2.blocks.27.res_post_norm2.0.bias" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.0 + }, + "layer_40_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.27.dcn.offset.weight", + "backbone.levels.2.blocks.27.dcn.mask.weight", + "backbone.levels.2.blocks.27.dcn.input_proj.weight", + "backbone.levels.2.blocks.27.dcn.output_proj.weight", + "backbone.levels.2.blocks.27.mlp.fc1.weight", + "backbone.levels.2.blocks.27.mlp.fc2.weight" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.05 + }, + "layer_41_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.norm1.0.weight", + "backbone.levels.2.blocks.28.norm1.0.bias", + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.28.dcn.offset.bias", + "backbone.levels.2.blocks.28.dcn.mask.bias", + "backbone.levels.2.blocks.28.dcn.input_proj.bias", + "backbone.levels.2.blocks.28.dcn.output_proj.bias", + "backbone.levels.2.blocks.28.norm2.0.weight", + "backbone.levels.2.blocks.28.norm2.0.bias", + "backbone.levels.2.blocks.28.mlp.fc1.bias", + "backbone.levels.2.blocks.28.mlp.fc2.bias", + "backbone.levels.2.blocks.28.res_post_norm1.0.weight", + "backbone.levels.2.blocks.28.res_post_norm1.0.bias", + "backbone.levels.2.blocks.28.res_post_norm2.0.weight", + "backbone.levels.2.blocks.28.res_post_norm2.0.bias" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.0 + }, + "layer_41_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.28.dcn.offset.weight", + "backbone.levels.2.blocks.28.dcn.mask.weight", + "backbone.levels.2.blocks.28.dcn.input_proj.weight", + "backbone.levels.2.blocks.28.dcn.output_proj.weight", + "backbone.levels.2.blocks.28.mlp.fc1.weight", + "backbone.levels.2.blocks.28.mlp.fc2.weight" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.05 + }, + "layer_42_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.norm1.0.weight", + "backbone.levels.2.blocks.29.norm1.0.bias", + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.29.dcn.offset.bias", + "backbone.levels.2.blocks.29.dcn.mask.bias", + "backbone.levels.2.blocks.29.dcn.input_proj.bias", + "backbone.levels.2.blocks.29.dcn.output_proj.bias", + "backbone.levels.2.blocks.29.norm2.0.weight", + "backbone.levels.2.blocks.29.norm2.0.bias", + "backbone.levels.2.blocks.29.mlp.fc1.bias", + "backbone.levels.2.blocks.29.mlp.fc2.bias", + "backbone.levels.2.blocks.29.res_post_norm1.0.weight", + "backbone.levels.2.blocks.29.res_post_norm1.0.bias", + "backbone.levels.2.blocks.29.res_post_norm2.0.weight", + "backbone.levels.2.blocks.29.res_post_norm2.0.bias" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.0 + }, + "layer_42_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.29.dcn.offset.weight", + "backbone.levels.2.blocks.29.dcn.mask.weight", + "backbone.levels.2.blocks.29.dcn.input_proj.weight", + "backbone.levels.2.blocks.29.dcn.output_proj.weight", + "backbone.levels.2.blocks.29.mlp.fc1.weight", + "backbone.levels.2.blocks.29.mlp.fc2.weight" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.05 + }, + "layer_43_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.norm1.0.weight", + "backbone.levels.2.blocks.30.norm1.0.bias", + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.30.dcn.offset.bias", + "backbone.levels.2.blocks.30.dcn.mask.bias", + "backbone.levels.2.blocks.30.dcn.input_proj.bias", + "backbone.levels.2.blocks.30.dcn.output_proj.bias", + "backbone.levels.2.blocks.30.norm2.0.weight", + "backbone.levels.2.blocks.30.norm2.0.bias", + "backbone.levels.2.blocks.30.mlp.fc1.bias", + "backbone.levels.2.blocks.30.mlp.fc2.bias", + "backbone.levels.2.blocks.30.res_post_norm1.0.weight", + "backbone.levels.2.blocks.30.res_post_norm1.0.bias", + "backbone.levels.2.blocks.30.res_post_norm2.0.weight", + "backbone.levels.2.blocks.30.res_post_norm2.0.bias" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.0 + }, + "layer_43_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.30.dcn.offset.weight", + "backbone.levels.2.blocks.30.dcn.mask.weight", + "backbone.levels.2.blocks.30.dcn.input_proj.weight", + "backbone.levels.2.blocks.30.dcn.output_proj.weight", + "backbone.levels.2.blocks.30.mlp.fc1.weight", + "backbone.levels.2.blocks.30.mlp.fc2.weight" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.05 + }, + "layer_44_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.norm1.0.weight", + "backbone.levels.2.blocks.31.norm1.0.bias", + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.31.dcn.offset.bias", + "backbone.levels.2.blocks.31.dcn.mask.bias", + "backbone.levels.2.blocks.31.dcn.input_proj.bias", + "backbone.levels.2.blocks.31.dcn.output_proj.bias", + "backbone.levels.2.blocks.31.norm2.0.weight", + "backbone.levels.2.blocks.31.norm2.0.bias", + "backbone.levels.2.blocks.31.mlp.fc1.bias", + "backbone.levels.2.blocks.31.mlp.fc2.bias", + "backbone.levels.2.blocks.31.res_post_norm1.0.weight", + "backbone.levels.2.blocks.31.res_post_norm1.0.bias", + "backbone.levels.2.blocks.31.res_post_norm2.0.weight", + "backbone.levels.2.blocks.31.res_post_norm2.0.bias" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.0 + }, + "layer_44_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.31.dcn.offset.weight", + "backbone.levels.2.blocks.31.dcn.mask.weight", + "backbone.levels.2.blocks.31.dcn.input_proj.weight", + "backbone.levels.2.blocks.31.dcn.output_proj.weight", + "backbone.levels.2.blocks.31.mlp.fc1.weight", + "backbone.levels.2.blocks.31.mlp.fc2.weight" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.05 + }, + "layer_45_no_decay": { + "param_names": [ + "backbone.levels.2.norm.0.weight", + "backbone.levels.2.norm.0.bias", + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias", + "backbone.levels.3.blocks.0.res_post_norm1.0.weight", + "backbone.levels.3.blocks.0.res_post_norm1.0.bias", + "backbone.levels.3.blocks.0.res_post_norm2.0.weight", + "backbone.levels.3.blocks.0.res_post_norm2.0.bias", + "backbone.levels.3.norm.0.weight", + "backbone.levels.3.norm.0.bias" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.0 + }, + "layer_45_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.05 + }, + "layer_46_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias", + "backbone.levels.3.blocks.1.res_post_norm1.0.weight", + "backbone.levels.3.blocks.1.res_post_norm1.0.bias", + "backbone.levels.3.blocks.1.res_post_norm2.0.weight", + "backbone.levels.3.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.0 + }, + "layer_46_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.05 + }, + "layer_47_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias", + "backbone.levels.3.blocks.2.res_post_norm1.0.weight", + "backbone.levels.3.blocks.2.res_post_norm1.0.bias", + "backbone.levels.3.blocks.2.res_post_norm2.0.weight", + "backbone.levels.3.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.0 + }, + "layer_47_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.05 + }, + "layer_48_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias", + "backbone.levels.3.blocks.3.res_post_norm1.0.weight", + "backbone.levels.3.blocks.3.res_post_norm1.0.bias", + "backbone.levels.3.blocks.3.res_post_norm2.0.weight", + "backbone.levels.3.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.0 + }, + "layer_48_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.05 + }, + "layer_49_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias", + "backbone.levels.3.blocks.4.res_post_norm1.0.weight", + "backbone.levels.3.blocks.4.res_post_norm1.0.bias", + "backbone.levels.3.blocks.4.res_post_norm2.0.weight", + "backbone.levels.3.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.0 + }, + "layer_49_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.05 + }, + "layer_50_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.norm1.0.weight", + "backbone.levels.3.blocks.5.norm1.0.bias", + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.5.dcn.offset.bias", + "backbone.levels.3.blocks.5.dcn.mask.bias", + "backbone.levels.3.blocks.5.dcn.input_proj.bias", + "backbone.levels.3.blocks.5.dcn.output_proj.bias", + "backbone.levels.3.blocks.5.norm2.0.weight", + "backbone.levels.3.blocks.5.norm2.0.bias", + "backbone.levels.3.blocks.5.mlp.fc1.bias", + "backbone.levels.3.blocks.5.mlp.fc2.bias", + "backbone.levels.3.blocks.5.res_post_norm1.0.weight", + "backbone.levels.3.blocks.5.res_post_norm1.0.bias", + "backbone.levels.3.blocks.5.res_post_norm2.0.weight", + "backbone.levels.3.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.0 + }, + "layer_50_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.5.dcn.offset.weight", + "backbone.levels.3.blocks.5.dcn.mask.weight", + "backbone.levels.3.blocks.5.dcn.input_proj.weight", + "backbone.levels.3.blocks.5.dcn.output_proj.weight", + "backbone.levels.3.blocks.5.mlp.fc1.weight", + "backbone.levels.3.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.05 + }, + "layer_51_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.weight", + "decode_head.pixel_decoder.input_convs.1.conv.weight", + "decode_head.pixel_decoder.input_convs.2.conv.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.level_encoding.weight", + "decode_head.pixel_decoder.lateral_convs.0.conv.weight", + "decode_head.pixel_decoder.output_convs.0.conv.weight", + "decode_head.pixel_decoder.mask_feature.weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.05 + }, + "layer_51_no_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.bias", + "decode_head.pixel_decoder.input_convs.0.gn.weight", + "decode_head.pixel_decoder.input_convs.0.gn.bias", + "decode_head.pixel_decoder.input_convs.1.conv.bias", + "decode_head.pixel_decoder.input_convs.1.gn.weight", + "decode_head.pixel_decoder.input_convs.1.gn.bias", + "decode_head.pixel_decoder.input_convs.2.conv.bias", + "decode_head.pixel_decoder.input_convs.2.gn.weight", + "decode_head.pixel_decoder.input_convs.2.gn.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.bias", + "decode_head.pixel_decoder.lateral_convs.0.gn.weight", + "decode_head.pixel_decoder.lateral_convs.0.gn.bias", + "decode_head.pixel_decoder.output_convs.0.gn.weight", + "decode_head.pixel_decoder.output_convs.0.gn.bias", + "decode_head.pixel_decoder.mask_feature.bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.0.norms.0.weight", + "decode_head.transformer_decoder.layers.0.norms.0.bias", + "decode_head.transformer_decoder.layers.0.norms.1.weight", + "decode_head.transformer_decoder.layers.0.norms.1.bias", + "decode_head.transformer_decoder.layers.0.norms.2.weight", + "decode_head.transformer_decoder.layers.0.norms.2.bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.1.norms.0.weight", + "decode_head.transformer_decoder.layers.1.norms.0.bias", + "decode_head.transformer_decoder.layers.1.norms.1.weight", + "decode_head.transformer_decoder.layers.1.norms.1.bias", + "decode_head.transformer_decoder.layers.1.norms.2.weight", + "decode_head.transformer_decoder.layers.1.norms.2.bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.2.norms.0.weight", + "decode_head.transformer_decoder.layers.2.norms.0.bias", + "decode_head.transformer_decoder.layers.2.norms.1.weight", + "decode_head.transformer_decoder.layers.2.norms.1.bias", + "decode_head.transformer_decoder.layers.2.norms.2.weight", + "decode_head.transformer_decoder.layers.2.norms.2.bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.3.norms.0.weight", + "decode_head.transformer_decoder.layers.3.norms.0.bias", + "decode_head.transformer_decoder.layers.3.norms.1.weight", + "decode_head.transformer_decoder.layers.3.norms.1.bias", + "decode_head.transformer_decoder.layers.3.norms.2.weight", + "decode_head.transformer_decoder.layers.3.norms.2.bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.4.norms.0.weight", + "decode_head.transformer_decoder.layers.4.norms.0.bias", + "decode_head.transformer_decoder.layers.4.norms.1.weight", + "decode_head.transformer_decoder.layers.4.norms.1.bias", + "decode_head.transformer_decoder.layers.4.norms.2.weight", + "decode_head.transformer_decoder.layers.4.norms.2.bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.5.norms.0.weight", + "decode_head.transformer_decoder.layers.5.norms.0.bias", + "decode_head.transformer_decoder.layers.5.norms.1.weight", + "decode_head.transformer_decoder.layers.5.norms.1.bias", + "decode_head.transformer_decoder.layers.5.norms.2.weight", + "decode_head.transformer_decoder.layers.5.norms.2.bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.6.norms.0.weight", + "decode_head.transformer_decoder.layers.6.norms.0.bias", + "decode_head.transformer_decoder.layers.6.norms.1.weight", + "decode_head.transformer_decoder.layers.6.norms.1.bias", + "decode_head.transformer_decoder.layers.6.norms.2.weight", + "decode_head.transformer_decoder.layers.6.norms.2.bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.7.norms.0.weight", + "decode_head.transformer_decoder.layers.7.norms.0.bias", + "decode_head.transformer_decoder.layers.7.norms.1.weight", + "decode_head.transformer_decoder.layers.7.norms.1.bias", + "decode_head.transformer_decoder.layers.7.norms.2.weight", + "decode_head.transformer_decoder.layers.7.norms.2.bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.8.norms.0.weight", + "decode_head.transformer_decoder.layers.8.norms.0.bias", + "decode_head.transformer_decoder.layers.8.norms.1.weight", + "decode_head.transformer_decoder.layers.8.norms.1.bias", + "decode_head.transformer_decoder.layers.8.norms.2.weight", + "decode_head.transformer_decoder.layers.8.norms.2.bias", + "decode_head.transformer_decoder.post_norm.weight", + "decode_head.transformer_decoder.post_norm.bias" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.0 + } +} +2025-05-28 02:58:11,576 - mmseg - INFO - Loaded 1159 images +2025-05-28 02:58:11,577 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth +2025-05-28 02:58:17,551 - mmseg - INFO - Start running, host: yiming@pasteur2.stanford.edu, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 02:58:17,552 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 02:58:17,552 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters +2025-05-28 02:58:17,552 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025728.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025728.log.json new file mode 100644 index 0000000000000000000000000000000000000000..296ba2959b301b8f6c71e9145c98c0a1209934c4 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_025728.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0: NVIDIA TITAN RTX\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 12.2, V12.2.91\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 2000367376, "exp_name": "mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "num_things_classes = 100\nnum_stuff_classes = 50\nnum_classes = 19\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderMask2Former',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=320,\n depths=[6, 6, 32, 6],\n groups=[10, 20, 40, 80],\n mlp_ratio=4.0,\n drop_path_rate=0.5,\n norm_layer='LN',\n layer_scale=None,\n offset_scale=1.0,\n post_norm=False,\n dw_kernel_size=5,\n res_post_norm=True,\n level2_post_norm=True,\n level2_post_norm_block_ids=[5, 11, 17, 23, 29],\n center_feature_scale=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='Mask2FormerHead',\n in_channels=[320, 640, 1280, 2560],\n feat_channels=256,\n out_channels=256,\n in_index=[0, 1, 2, 3],\n num_things_classes=100,\n num_stuff_classes=50,\n num_queries=100,\n num_transformer_feat_level=3,\n pixel_decoder=dict(\n type='MSDeformAttnPixelDecoder',\n num_outs=3,\n norm_cfg=dict(type='GN', num_groups=32),\n act_cfg=dict(type='ReLU'),\n encoder=dict(\n type='DetrTransformerEncoder',\n num_layers=6,\n transformerlayers=dict(\n type='BaseTransformerLayer',\n attn_cfgs=dict(\n type='MultiScaleDeformableAttention',\n embed_dims=256,\n num_heads=8,\n num_levels=3,\n num_points=4,\n im2col_step=64,\n dropout=0.0,\n batch_first=False,\n norm_cfg=None,\n init_cfg=None),\n ffn_cfgs=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n with_cp=False),\n operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n init_cfg=None),\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n init_cfg=None),\n enforce_decoder_input_project=False,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n transformer_decoder=dict(\n type='DetrTransformerDecoder',\n return_intermediate=True,\n num_layers=9,\n transformerlayers=dict(\n type='DetrTransformerDecoderLayer',\n attn_cfgs=dict(\n type='MultiheadAttention',\n embed_dims=256,\n num_heads=8,\n attn_drop=0.0,\n proj_drop=0.0,\n dropout_layer=None,\n batch_first=False),\n ffn_cfgs=dict(\n embed_dims=256,\n feedforward_channels=2048,\n num_fcs=2,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_drop=0.0,\n dropout_layer=None,\n add_identity=True,\n with_cp=False),\n feedforward_channels=2048,\n operation_order=('cross_attn', 'norm', 'self_attn', 'norm',\n 'ffn', 'norm')),\n init_cfg=None),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=2.0,\n reduction='mean',\n class_weight=[\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1\n ]),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=5.0),\n loss_dice=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n reduction='mean',\n naive_dice=True,\n eps=1.0,\n loss_weight=5.0),\n num_classes=19,\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512))),\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512)),\n init_cfg=None)\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (1024, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(\n type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=1e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=50,\n layer_decay_rate=0.95,\n depths=[6, 6, 32, 6],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 1)\nauto_resume = False\ndevice = 'cuda'\nseed = 2000367376\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031222.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031222.log new file mode 100644 index 0000000000000000000000000000000000000000..1cc0abfdf67ebd08731fca3f17393f3458b4efca --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031222.log @@ -0,0 +1,7658 @@ +2025-05-28 03:12:22,098 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 03:12:22,099 - mmseg - INFO - OpenCV num_threads is `8 +2025-05-28 03:12:22,137 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0: NVIDIA TITAN RTX +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 12.2, V12.2.91 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 03:12:22,138 - mmseg - INFO - Distributed training: True +2025-05-28 03:12:22,570 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 1) +auto_resume = False + +2025-05-28 03:12:22,571 - mmseg - INFO - Set random seed to 1121623199, deterministic: False +2025-05-28 03:12:22,571 - mmseg - INFO - using core type: DCNv3 +2025-05-28 03:12:22,571 - mmseg - INFO - using activation layer: GELU +2025-05-28 03:12:22,572 - mmseg - INFO - using main norm layer: LN +2025-05-28 03:12:22,572 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 03:12:22,572 - mmseg - INFO - level2_post_norm: True +2025-05-28 03:12:22,572 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 03:12:22,572 - mmseg - INFO - res_post_norm: True +2025-05-28 03:12:22,572 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 03:12:39,262 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 03:12:53,983 - mmseg - INFO - Loaded 2316 images +2025-05-28 03:12:53,983 - mmseg - INFO - Built training dataset from config: {'type': 'CityscapesDataset', 'data_root': '/pasteur/u/yiming/homework4/cityscapes', 'img_dir': 'leftImg8bit/', 'ann_dir': 'gtFine/', 'pipeline': [{'type': 'LoadImageFromFile'}, {'type': 'LoadAnnotations'}, {'type': 'Resize', 'img_scale': (2048, 1024), 'ratio_range': (0.5, 2.0)}, {'type': 'RandomCrop', 'crop_size': (1024, 1024), 'cat_max_ratio': 0.75}, {'type': 'RandomFlip', 'prob': 0.5}, {'type': 'PhotoMetricDistortion'}, {'type': 'Normalize', 'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True}, {'type': 'Pad', 'size': (1024, 1024), 'pad_val': 0, 'seg_pad_val': 255}, {'type': 'ToMask'}, {'type': 'DefaultFormatBundle'}, {'type': 'Collect', 'keys': ['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']}], 'split': 'splits/fold_1_train_split.txt'} +2025-05-28 03:12:53,984 - mmseg - INFO - Number of samples in training dataset: 2316 +2025-05-28 03:12:59,722 - mmseg - INFO - {'num_layers': 50, 'layer_decay_rate': 0.95, 'depths': [6, 6, 32, 6], 'offset_lr_scale': 1.0} +2025-05-28 03:12:59,722 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.950000 - 52 +2025-05-28 03:12:59,733 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight", + "decode_head.query_embed.weight", + "decode_head.query_feat.weight", + "decode_head.level_embed.weight", + "decode_head.cls_embed.weight", + "decode_head.mask_embed.0.weight", + "decode_head.mask_embed.2.weight", + "decode_head.mask_embed.4.weight" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias", + "decode_head.cls_embed.bias", + "decode_head.mask_embed.0.bias", + "decode_head.mask_embed.2.bias", + "decode_head.mask_embed.4.bias" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias", + "backbone.levels.0.blocks.0.res_post_norm1.0.weight", + "backbone.levels.0.blocks.0.res_post_norm1.0.bias", + "backbone.levels.0.blocks.0.res_post_norm2.0.weight", + "backbone.levels.0.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias", + "backbone.levels.0.blocks.1.res_post_norm1.0.weight", + "backbone.levels.0.blocks.1.res_post_norm1.0.bias", + "backbone.levels.0.blocks.1.res_post_norm2.0.weight", + "backbone.levels.0.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias", + "backbone.levels.0.blocks.2.res_post_norm1.0.weight", + "backbone.levels.0.blocks.2.res_post_norm1.0.bias", + "backbone.levels.0.blocks.2.res_post_norm2.0.weight", + "backbone.levels.0.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias", + "backbone.levels.0.blocks.3.res_post_norm1.0.weight", + "backbone.levels.0.blocks.3.res_post_norm1.0.bias", + "backbone.levels.0.blocks.3.res_post_norm2.0.weight", + "backbone.levels.0.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias", + "backbone.levels.0.blocks.4.res_post_norm1.0.weight", + "backbone.levels.0.blocks.4.res_post_norm1.0.bias", + "backbone.levels.0.blocks.4.res_post_norm2.0.weight", + "backbone.levels.0.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.norm1.0.weight", + "backbone.levels.0.blocks.5.norm1.0.bias", + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.5.dcn.offset.bias", + "backbone.levels.0.blocks.5.dcn.mask.bias", + "backbone.levels.0.blocks.5.dcn.input_proj.bias", + "backbone.levels.0.blocks.5.dcn.output_proj.bias", + "backbone.levels.0.blocks.5.norm2.0.weight", + "backbone.levels.0.blocks.5.norm2.0.bias", + "backbone.levels.0.blocks.5.mlp.fc1.bias", + "backbone.levels.0.blocks.5.mlp.fc2.bias", + "backbone.levels.0.blocks.5.res_post_norm1.0.weight", + "backbone.levels.0.blocks.5.res_post_norm1.0.bias", + "backbone.levels.0.blocks.5.res_post_norm2.0.weight", + "backbone.levels.0.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.0 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.5.dcn.offset.weight", + "backbone.levels.0.blocks.5.dcn.mask.weight", + "backbone.levels.0.blocks.5.dcn.input_proj.weight", + "backbone.levels.0.blocks.5.dcn.output_proj.weight", + "backbone.levels.0.blocks.5.mlp.fc1.weight", + "backbone.levels.0.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.05 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.0.norm.0.weight", + "backbone.levels.0.norm.0.bias", + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias", + "backbone.levels.1.blocks.0.res_post_norm1.0.weight", + "backbone.levels.1.blocks.0.res_post_norm1.0.bias", + "backbone.levels.1.blocks.0.res_post_norm2.0.weight", + "backbone.levels.1.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias", + "backbone.levels.1.blocks.1.res_post_norm1.0.weight", + "backbone.levels.1.blocks.1.res_post_norm1.0.bias", + "backbone.levels.1.blocks.1.res_post_norm2.0.weight", + "backbone.levels.1.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias", + "backbone.levels.1.blocks.2.res_post_norm1.0.weight", + "backbone.levels.1.blocks.2.res_post_norm1.0.bias", + "backbone.levels.1.blocks.2.res_post_norm2.0.weight", + "backbone.levels.1.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias", + "backbone.levels.1.blocks.3.res_post_norm1.0.weight", + "backbone.levels.1.blocks.3.res_post_norm1.0.bias", + "backbone.levels.1.blocks.3.res_post_norm2.0.weight", + "backbone.levels.1.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias", + "backbone.levels.1.blocks.4.res_post_norm1.0.weight", + "backbone.levels.1.blocks.4.res_post_norm1.0.bias", + "backbone.levels.1.blocks.4.res_post_norm2.0.weight", + "backbone.levels.1.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.0 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.05 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.norm1.0.weight", + "backbone.levels.1.blocks.5.norm1.0.bias", + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.5.dcn.offset.bias", + "backbone.levels.1.blocks.5.dcn.mask.bias", + "backbone.levels.1.blocks.5.dcn.input_proj.bias", + "backbone.levels.1.blocks.5.dcn.output_proj.bias", + "backbone.levels.1.blocks.5.norm2.0.weight", + "backbone.levels.1.blocks.5.norm2.0.bias", + "backbone.levels.1.blocks.5.mlp.fc1.bias", + "backbone.levels.1.blocks.5.mlp.fc2.bias", + "backbone.levels.1.blocks.5.res_post_norm1.0.weight", + "backbone.levels.1.blocks.5.res_post_norm1.0.bias", + "backbone.levels.1.blocks.5.res_post_norm2.0.weight", + "backbone.levels.1.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.5.dcn.offset.weight", + "backbone.levels.1.blocks.5.dcn.mask.weight", + "backbone.levels.1.blocks.5.dcn.input_proj.weight", + "backbone.levels.1.blocks.5.dcn.output_proj.weight", + "backbone.levels.1.blocks.5.mlp.fc1.weight", + "backbone.levels.1.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.1.norm.0.weight", + "backbone.levels.1.norm.0.bias", + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias", + "backbone.levels.2.blocks.0.res_post_norm1.0.weight", + "backbone.levels.2.blocks.0.res_post_norm1.0.bias", + "backbone.levels.2.blocks.0.res_post_norm2.0.weight", + "backbone.levels.2.blocks.0.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.0.0.weight", + "backbone.levels.2.post_norms.0.0.bias" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias", + "backbone.levels.2.blocks.1.res_post_norm1.0.weight", + "backbone.levels.2.blocks.1.res_post_norm1.0.bias", + "backbone.levels.2.blocks.1.res_post_norm2.0.weight", + "backbone.levels.2.blocks.1.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.1.0.weight", + "backbone.levels.2.post_norms.1.0.bias" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias", + "backbone.levels.2.blocks.2.res_post_norm1.0.weight", + "backbone.levels.2.blocks.2.res_post_norm1.0.bias", + "backbone.levels.2.blocks.2.res_post_norm2.0.weight", + "backbone.levels.2.blocks.2.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.2.0.weight", + "backbone.levels.2.post_norms.2.0.bias" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias", + "backbone.levels.2.blocks.3.res_post_norm1.0.weight", + "backbone.levels.2.blocks.3.res_post_norm1.0.bias", + "backbone.levels.2.blocks.3.res_post_norm2.0.weight", + "backbone.levels.2.blocks.3.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.3.0.weight", + "backbone.levels.2.post_norms.3.0.bias" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias", + "backbone.levels.2.blocks.4.res_post_norm1.0.weight", + "backbone.levels.2.blocks.4.res_post_norm1.0.bias", + "backbone.levels.2.blocks.4.res_post_norm2.0.weight", + "backbone.levels.2.blocks.4.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.4.0.weight", + "backbone.levels.2.post_norms.4.0.bias" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias", + "backbone.levels.2.blocks.5.res_post_norm1.0.weight", + "backbone.levels.2.blocks.5.res_post_norm1.0.bias", + "backbone.levels.2.blocks.5.res_post_norm2.0.weight", + "backbone.levels.2.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias", + "backbone.levels.2.blocks.6.res_post_norm1.0.weight", + "backbone.levels.2.blocks.6.res_post_norm1.0.bias", + "backbone.levels.2.blocks.6.res_post_norm2.0.weight", + "backbone.levels.2.blocks.6.res_post_norm2.0.bias" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias", + "backbone.levels.2.blocks.7.res_post_norm1.0.weight", + "backbone.levels.2.blocks.7.res_post_norm1.0.bias", + "backbone.levels.2.blocks.7.res_post_norm2.0.weight", + "backbone.levels.2.blocks.7.res_post_norm2.0.bias" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias", + "backbone.levels.2.blocks.8.res_post_norm1.0.weight", + "backbone.levels.2.blocks.8.res_post_norm1.0.bias", + "backbone.levels.2.blocks.8.res_post_norm2.0.weight", + "backbone.levels.2.blocks.8.res_post_norm2.0.bias" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias", + "backbone.levels.2.blocks.9.res_post_norm1.0.weight", + "backbone.levels.2.blocks.9.res_post_norm1.0.bias", + "backbone.levels.2.blocks.9.res_post_norm2.0.weight", + "backbone.levels.2.blocks.9.res_post_norm2.0.bias" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias", + "backbone.levels.2.blocks.10.res_post_norm1.0.weight", + "backbone.levels.2.blocks.10.res_post_norm1.0.bias", + "backbone.levels.2.blocks.10.res_post_norm2.0.weight", + "backbone.levels.2.blocks.10.res_post_norm2.0.bias" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias", + "backbone.levels.2.blocks.11.res_post_norm1.0.weight", + "backbone.levels.2.blocks.11.res_post_norm1.0.bias", + "backbone.levels.2.blocks.11.res_post_norm2.0.weight", + "backbone.levels.2.blocks.11.res_post_norm2.0.bias" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias", + "backbone.levels.2.blocks.12.res_post_norm1.0.weight", + "backbone.levels.2.blocks.12.res_post_norm1.0.bias", + "backbone.levels.2.blocks.12.res_post_norm2.0.weight", + "backbone.levels.2.blocks.12.res_post_norm2.0.bias" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias", + "backbone.levels.2.blocks.13.res_post_norm1.0.weight", + "backbone.levels.2.blocks.13.res_post_norm1.0.bias", + "backbone.levels.2.blocks.13.res_post_norm2.0.weight", + "backbone.levels.2.blocks.13.res_post_norm2.0.bias" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias", + "backbone.levels.2.blocks.14.res_post_norm1.0.weight", + "backbone.levels.2.blocks.14.res_post_norm1.0.bias", + "backbone.levels.2.blocks.14.res_post_norm2.0.weight", + "backbone.levels.2.blocks.14.res_post_norm2.0.bias" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias", + "backbone.levels.2.blocks.15.res_post_norm1.0.weight", + "backbone.levels.2.blocks.15.res_post_norm1.0.bias", + "backbone.levels.2.blocks.15.res_post_norm2.0.weight", + "backbone.levels.2.blocks.15.res_post_norm2.0.bias" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias", + "backbone.levels.2.blocks.16.res_post_norm1.0.weight", + "backbone.levels.2.blocks.16.res_post_norm1.0.bias", + "backbone.levels.2.blocks.16.res_post_norm2.0.weight", + "backbone.levels.2.blocks.16.res_post_norm2.0.bias" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias", + "backbone.levels.2.blocks.17.res_post_norm1.0.weight", + "backbone.levels.2.blocks.17.res_post_norm1.0.bias", + "backbone.levels.2.blocks.17.res_post_norm2.0.weight", + "backbone.levels.2.blocks.17.res_post_norm2.0.bias" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias", + "backbone.levels.2.blocks.18.res_post_norm1.0.weight", + "backbone.levels.2.blocks.18.res_post_norm1.0.bias", + "backbone.levels.2.blocks.18.res_post_norm2.0.weight", + "backbone.levels.2.blocks.18.res_post_norm2.0.bias" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias", + "backbone.levels.2.blocks.19.res_post_norm1.0.weight", + "backbone.levels.2.blocks.19.res_post_norm1.0.bias", + "backbone.levels.2.blocks.19.res_post_norm2.0.weight", + "backbone.levels.2.blocks.19.res_post_norm2.0.bias" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias", + "backbone.levels.2.blocks.20.res_post_norm1.0.weight", + "backbone.levels.2.blocks.20.res_post_norm1.0.bias", + "backbone.levels.2.blocks.20.res_post_norm2.0.weight", + "backbone.levels.2.blocks.20.res_post_norm2.0.bias" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias", + "backbone.levels.2.blocks.21.res_post_norm1.0.weight", + "backbone.levels.2.blocks.21.res_post_norm1.0.bias", + "backbone.levels.2.blocks.21.res_post_norm2.0.weight", + "backbone.levels.2.blocks.21.res_post_norm2.0.bias" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias", + "backbone.levels.2.blocks.22.res_post_norm1.0.weight", + "backbone.levels.2.blocks.22.res_post_norm1.0.bias", + "backbone.levels.2.blocks.22.res_post_norm2.0.weight", + "backbone.levels.2.blocks.22.res_post_norm2.0.bias" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.0 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.05 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias", + "backbone.levels.2.blocks.23.res_post_norm1.0.weight", + "backbone.levels.2.blocks.23.res_post_norm1.0.bias", + "backbone.levels.2.blocks.23.res_post_norm2.0.weight", + "backbone.levels.2.blocks.23.res_post_norm2.0.bias" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.norm1.0.weight", + "backbone.levels.2.blocks.24.norm1.0.bias", + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.24.dcn.offset.bias", + "backbone.levels.2.blocks.24.dcn.mask.bias", + "backbone.levels.2.blocks.24.dcn.input_proj.bias", + "backbone.levels.2.blocks.24.dcn.output_proj.bias", + "backbone.levels.2.blocks.24.norm2.0.weight", + "backbone.levels.2.blocks.24.norm2.0.bias", + "backbone.levels.2.blocks.24.mlp.fc1.bias", + "backbone.levels.2.blocks.24.mlp.fc2.bias", + "backbone.levels.2.blocks.24.res_post_norm1.0.weight", + "backbone.levels.2.blocks.24.res_post_norm1.0.bias", + "backbone.levels.2.blocks.24.res_post_norm2.0.weight", + "backbone.levels.2.blocks.24.res_post_norm2.0.bias" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.24.dcn.offset.weight", + "backbone.levels.2.blocks.24.dcn.mask.weight", + "backbone.levels.2.blocks.24.dcn.input_proj.weight", + "backbone.levels.2.blocks.24.dcn.output_proj.weight", + "backbone.levels.2.blocks.24.mlp.fc1.weight", + "backbone.levels.2.blocks.24.mlp.fc2.weight" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.norm1.0.weight", + "backbone.levels.2.blocks.25.norm1.0.bias", + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.25.dcn.offset.bias", + "backbone.levels.2.blocks.25.dcn.mask.bias", + "backbone.levels.2.blocks.25.dcn.input_proj.bias", + "backbone.levels.2.blocks.25.dcn.output_proj.bias", + "backbone.levels.2.blocks.25.norm2.0.weight", + "backbone.levels.2.blocks.25.norm2.0.bias", + "backbone.levels.2.blocks.25.mlp.fc1.bias", + "backbone.levels.2.blocks.25.mlp.fc2.bias", + "backbone.levels.2.blocks.25.res_post_norm1.0.weight", + "backbone.levels.2.blocks.25.res_post_norm1.0.bias", + "backbone.levels.2.blocks.25.res_post_norm2.0.weight", + "backbone.levels.2.blocks.25.res_post_norm2.0.bias" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.25.dcn.offset.weight", + "backbone.levels.2.blocks.25.dcn.mask.weight", + "backbone.levels.2.blocks.25.dcn.input_proj.weight", + "backbone.levels.2.blocks.25.dcn.output_proj.weight", + "backbone.levels.2.blocks.25.mlp.fc1.weight", + "backbone.levels.2.blocks.25.mlp.fc2.weight" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.norm1.0.weight", + "backbone.levels.2.blocks.26.norm1.0.bias", + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.26.dcn.offset.bias", + "backbone.levels.2.blocks.26.dcn.mask.bias", + "backbone.levels.2.blocks.26.dcn.input_proj.bias", + "backbone.levels.2.blocks.26.dcn.output_proj.bias", + "backbone.levels.2.blocks.26.norm2.0.weight", + "backbone.levels.2.blocks.26.norm2.0.bias", + "backbone.levels.2.blocks.26.mlp.fc1.bias", + "backbone.levels.2.blocks.26.mlp.fc2.bias", + "backbone.levels.2.blocks.26.res_post_norm1.0.weight", + "backbone.levels.2.blocks.26.res_post_norm1.0.bias", + "backbone.levels.2.blocks.26.res_post_norm2.0.weight", + "backbone.levels.2.blocks.26.res_post_norm2.0.bias" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.26.dcn.offset.weight", + "backbone.levels.2.blocks.26.dcn.mask.weight", + "backbone.levels.2.blocks.26.dcn.input_proj.weight", + "backbone.levels.2.blocks.26.dcn.output_proj.weight", + "backbone.levels.2.blocks.26.mlp.fc1.weight", + "backbone.levels.2.blocks.26.mlp.fc2.weight" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.norm1.0.weight", + "backbone.levels.2.blocks.27.norm1.0.bias", + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.27.dcn.offset.bias", + "backbone.levels.2.blocks.27.dcn.mask.bias", + "backbone.levels.2.blocks.27.dcn.input_proj.bias", + "backbone.levels.2.blocks.27.dcn.output_proj.bias", + "backbone.levels.2.blocks.27.norm2.0.weight", + "backbone.levels.2.blocks.27.norm2.0.bias", + "backbone.levels.2.blocks.27.mlp.fc1.bias", + "backbone.levels.2.blocks.27.mlp.fc2.bias", + "backbone.levels.2.blocks.27.res_post_norm1.0.weight", + "backbone.levels.2.blocks.27.res_post_norm1.0.bias", + "backbone.levels.2.blocks.27.res_post_norm2.0.weight", + "backbone.levels.2.blocks.27.res_post_norm2.0.bias" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.0 + }, + "layer_40_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.27.dcn.offset.weight", + "backbone.levels.2.blocks.27.dcn.mask.weight", + "backbone.levels.2.blocks.27.dcn.input_proj.weight", + "backbone.levels.2.blocks.27.dcn.output_proj.weight", + "backbone.levels.2.blocks.27.mlp.fc1.weight", + "backbone.levels.2.blocks.27.mlp.fc2.weight" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.05 + }, + "layer_41_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.norm1.0.weight", + "backbone.levels.2.blocks.28.norm1.0.bias", + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.28.dcn.offset.bias", + "backbone.levels.2.blocks.28.dcn.mask.bias", + "backbone.levels.2.blocks.28.dcn.input_proj.bias", + "backbone.levels.2.blocks.28.dcn.output_proj.bias", + "backbone.levels.2.blocks.28.norm2.0.weight", + "backbone.levels.2.blocks.28.norm2.0.bias", + "backbone.levels.2.blocks.28.mlp.fc1.bias", + "backbone.levels.2.blocks.28.mlp.fc2.bias", + "backbone.levels.2.blocks.28.res_post_norm1.0.weight", + "backbone.levels.2.blocks.28.res_post_norm1.0.bias", + "backbone.levels.2.blocks.28.res_post_norm2.0.weight", + "backbone.levels.2.blocks.28.res_post_norm2.0.bias" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.0 + }, + "layer_41_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.28.dcn.offset.weight", + "backbone.levels.2.blocks.28.dcn.mask.weight", + "backbone.levels.2.blocks.28.dcn.input_proj.weight", + "backbone.levels.2.blocks.28.dcn.output_proj.weight", + "backbone.levels.2.blocks.28.mlp.fc1.weight", + "backbone.levels.2.blocks.28.mlp.fc2.weight" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.05 + }, + "layer_42_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.norm1.0.weight", + "backbone.levels.2.blocks.29.norm1.0.bias", + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.29.dcn.offset.bias", + "backbone.levels.2.blocks.29.dcn.mask.bias", + "backbone.levels.2.blocks.29.dcn.input_proj.bias", + "backbone.levels.2.blocks.29.dcn.output_proj.bias", + "backbone.levels.2.blocks.29.norm2.0.weight", + "backbone.levels.2.blocks.29.norm2.0.bias", + "backbone.levels.2.blocks.29.mlp.fc1.bias", + "backbone.levels.2.blocks.29.mlp.fc2.bias", + "backbone.levels.2.blocks.29.res_post_norm1.0.weight", + "backbone.levels.2.blocks.29.res_post_norm1.0.bias", + "backbone.levels.2.blocks.29.res_post_norm2.0.weight", + "backbone.levels.2.blocks.29.res_post_norm2.0.bias" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.0 + }, + "layer_42_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.29.dcn.offset.weight", + "backbone.levels.2.blocks.29.dcn.mask.weight", + "backbone.levels.2.blocks.29.dcn.input_proj.weight", + "backbone.levels.2.blocks.29.dcn.output_proj.weight", + "backbone.levels.2.blocks.29.mlp.fc1.weight", + "backbone.levels.2.blocks.29.mlp.fc2.weight" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.05 + }, + "layer_43_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.norm1.0.weight", + "backbone.levels.2.blocks.30.norm1.0.bias", + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.30.dcn.offset.bias", + "backbone.levels.2.blocks.30.dcn.mask.bias", + "backbone.levels.2.blocks.30.dcn.input_proj.bias", + "backbone.levels.2.blocks.30.dcn.output_proj.bias", + "backbone.levels.2.blocks.30.norm2.0.weight", + "backbone.levels.2.blocks.30.norm2.0.bias", + "backbone.levels.2.blocks.30.mlp.fc1.bias", + "backbone.levels.2.blocks.30.mlp.fc2.bias", + "backbone.levels.2.blocks.30.res_post_norm1.0.weight", + "backbone.levels.2.blocks.30.res_post_norm1.0.bias", + "backbone.levels.2.blocks.30.res_post_norm2.0.weight", + "backbone.levels.2.blocks.30.res_post_norm2.0.bias" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.0 + }, + "layer_43_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.30.dcn.offset.weight", + "backbone.levels.2.blocks.30.dcn.mask.weight", + "backbone.levels.2.blocks.30.dcn.input_proj.weight", + "backbone.levels.2.blocks.30.dcn.output_proj.weight", + "backbone.levels.2.blocks.30.mlp.fc1.weight", + "backbone.levels.2.blocks.30.mlp.fc2.weight" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.05 + }, + "layer_44_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.norm1.0.weight", + "backbone.levels.2.blocks.31.norm1.0.bias", + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.31.dcn.offset.bias", + "backbone.levels.2.blocks.31.dcn.mask.bias", + "backbone.levels.2.blocks.31.dcn.input_proj.bias", + "backbone.levels.2.blocks.31.dcn.output_proj.bias", + "backbone.levels.2.blocks.31.norm2.0.weight", + "backbone.levels.2.blocks.31.norm2.0.bias", + "backbone.levels.2.blocks.31.mlp.fc1.bias", + "backbone.levels.2.blocks.31.mlp.fc2.bias", + "backbone.levels.2.blocks.31.res_post_norm1.0.weight", + "backbone.levels.2.blocks.31.res_post_norm1.0.bias", + "backbone.levels.2.blocks.31.res_post_norm2.0.weight", + "backbone.levels.2.blocks.31.res_post_norm2.0.bias" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.0 + }, + "layer_44_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.31.dcn.offset.weight", + "backbone.levels.2.blocks.31.dcn.mask.weight", + "backbone.levels.2.blocks.31.dcn.input_proj.weight", + "backbone.levels.2.blocks.31.dcn.output_proj.weight", + "backbone.levels.2.blocks.31.mlp.fc1.weight", + "backbone.levels.2.blocks.31.mlp.fc2.weight" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.05 + }, + "layer_45_no_decay": { + "param_names": [ + "backbone.levels.2.norm.0.weight", + "backbone.levels.2.norm.0.bias", + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias", + "backbone.levels.3.blocks.0.res_post_norm1.0.weight", + "backbone.levels.3.blocks.0.res_post_norm1.0.bias", + "backbone.levels.3.blocks.0.res_post_norm2.0.weight", + "backbone.levels.3.blocks.0.res_post_norm2.0.bias", + "backbone.levels.3.norm.0.weight", + "backbone.levels.3.norm.0.bias" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.0 + }, + "layer_45_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.05 + }, + "layer_46_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias", + "backbone.levels.3.blocks.1.res_post_norm1.0.weight", + "backbone.levels.3.blocks.1.res_post_norm1.0.bias", + "backbone.levels.3.blocks.1.res_post_norm2.0.weight", + "backbone.levels.3.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.0 + }, + "layer_46_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.05 + }, + "layer_47_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias", + "backbone.levels.3.blocks.2.res_post_norm1.0.weight", + "backbone.levels.3.blocks.2.res_post_norm1.0.bias", + "backbone.levels.3.blocks.2.res_post_norm2.0.weight", + "backbone.levels.3.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.0 + }, + "layer_47_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.05 + }, + "layer_48_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias", + "backbone.levels.3.blocks.3.res_post_norm1.0.weight", + "backbone.levels.3.blocks.3.res_post_norm1.0.bias", + "backbone.levels.3.blocks.3.res_post_norm2.0.weight", + "backbone.levels.3.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.0 + }, + "layer_48_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.05 + }, + "layer_49_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias", + "backbone.levels.3.blocks.4.res_post_norm1.0.weight", + "backbone.levels.3.blocks.4.res_post_norm1.0.bias", + "backbone.levels.3.blocks.4.res_post_norm2.0.weight", + "backbone.levels.3.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.0 + }, + "layer_49_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.05 + }, + "layer_50_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.norm1.0.weight", + "backbone.levels.3.blocks.5.norm1.0.bias", + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.5.dcn.offset.bias", + "backbone.levels.3.blocks.5.dcn.mask.bias", + "backbone.levels.3.blocks.5.dcn.input_proj.bias", + "backbone.levels.3.blocks.5.dcn.output_proj.bias", + "backbone.levels.3.blocks.5.norm2.0.weight", + "backbone.levels.3.blocks.5.norm2.0.bias", + "backbone.levels.3.blocks.5.mlp.fc1.bias", + "backbone.levels.3.blocks.5.mlp.fc2.bias", + "backbone.levels.3.blocks.5.res_post_norm1.0.weight", + "backbone.levels.3.blocks.5.res_post_norm1.0.bias", + "backbone.levels.3.blocks.5.res_post_norm2.0.weight", + "backbone.levels.3.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.0 + }, + "layer_50_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.5.dcn.offset.weight", + "backbone.levels.3.blocks.5.dcn.mask.weight", + "backbone.levels.3.blocks.5.dcn.input_proj.weight", + "backbone.levels.3.blocks.5.dcn.output_proj.weight", + "backbone.levels.3.blocks.5.mlp.fc1.weight", + "backbone.levels.3.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.05 + }, + "layer_51_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.weight", + "decode_head.pixel_decoder.input_convs.1.conv.weight", + "decode_head.pixel_decoder.input_convs.2.conv.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.level_encoding.weight", + "decode_head.pixel_decoder.lateral_convs.0.conv.weight", + "decode_head.pixel_decoder.output_convs.0.conv.weight", + "decode_head.pixel_decoder.mask_feature.weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.05 + }, + "layer_51_no_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.bias", + "decode_head.pixel_decoder.input_convs.0.gn.weight", + "decode_head.pixel_decoder.input_convs.0.gn.bias", + "decode_head.pixel_decoder.input_convs.1.conv.bias", + "decode_head.pixel_decoder.input_convs.1.gn.weight", + "decode_head.pixel_decoder.input_convs.1.gn.bias", + "decode_head.pixel_decoder.input_convs.2.conv.bias", + "decode_head.pixel_decoder.input_convs.2.gn.weight", + "decode_head.pixel_decoder.input_convs.2.gn.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.bias", + "decode_head.pixel_decoder.lateral_convs.0.gn.weight", + "decode_head.pixel_decoder.lateral_convs.0.gn.bias", + "decode_head.pixel_decoder.output_convs.0.gn.weight", + "decode_head.pixel_decoder.output_convs.0.gn.bias", + "decode_head.pixel_decoder.mask_feature.bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.0.norms.0.weight", + "decode_head.transformer_decoder.layers.0.norms.0.bias", + "decode_head.transformer_decoder.layers.0.norms.1.weight", + "decode_head.transformer_decoder.layers.0.norms.1.bias", + "decode_head.transformer_decoder.layers.0.norms.2.weight", + "decode_head.transformer_decoder.layers.0.norms.2.bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.1.norms.0.weight", + "decode_head.transformer_decoder.layers.1.norms.0.bias", + "decode_head.transformer_decoder.layers.1.norms.1.weight", + "decode_head.transformer_decoder.layers.1.norms.1.bias", + "decode_head.transformer_decoder.layers.1.norms.2.weight", + "decode_head.transformer_decoder.layers.1.norms.2.bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.2.norms.0.weight", + "decode_head.transformer_decoder.layers.2.norms.0.bias", + "decode_head.transformer_decoder.layers.2.norms.1.weight", + "decode_head.transformer_decoder.layers.2.norms.1.bias", + "decode_head.transformer_decoder.layers.2.norms.2.weight", + "decode_head.transformer_decoder.layers.2.norms.2.bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.3.norms.0.weight", + "decode_head.transformer_decoder.layers.3.norms.0.bias", + "decode_head.transformer_decoder.layers.3.norms.1.weight", + "decode_head.transformer_decoder.layers.3.norms.1.bias", + "decode_head.transformer_decoder.layers.3.norms.2.weight", + "decode_head.transformer_decoder.layers.3.norms.2.bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.4.norms.0.weight", + "decode_head.transformer_decoder.layers.4.norms.0.bias", + "decode_head.transformer_decoder.layers.4.norms.1.weight", + "decode_head.transformer_decoder.layers.4.norms.1.bias", + "decode_head.transformer_decoder.layers.4.norms.2.weight", + "decode_head.transformer_decoder.layers.4.norms.2.bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.5.norms.0.weight", + "decode_head.transformer_decoder.layers.5.norms.0.bias", + "decode_head.transformer_decoder.layers.5.norms.1.weight", + "decode_head.transformer_decoder.layers.5.norms.1.bias", + "decode_head.transformer_decoder.layers.5.norms.2.weight", + "decode_head.transformer_decoder.layers.5.norms.2.bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.6.norms.0.weight", + "decode_head.transformer_decoder.layers.6.norms.0.bias", + "decode_head.transformer_decoder.layers.6.norms.1.weight", + "decode_head.transformer_decoder.layers.6.norms.1.bias", + "decode_head.transformer_decoder.layers.6.norms.2.weight", + "decode_head.transformer_decoder.layers.6.norms.2.bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.7.norms.0.weight", + "decode_head.transformer_decoder.layers.7.norms.0.bias", + "decode_head.transformer_decoder.layers.7.norms.1.weight", + "decode_head.transformer_decoder.layers.7.norms.1.bias", + "decode_head.transformer_decoder.layers.7.norms.2.weight", + "decode_head.transformer_decoder.layers.7.norms.2.bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.8.norms.0.weight", + "decode_head.transformer_decoder.layers.8.norms.0.bias", + "decode_head.transformer_decoder.layers.8.norms.1.weight", + "decode_head.transformer_decoder.layers.8.norms.1.bias", + "decode_head.transformer_decoder.layers.8.norms.2.weight", + "decode_head.transformer_decoder.layers.8.norms.2.bias", + "decode_head.transformer_decoder.post_norm.weight", + "decode_head.transformer_decoder.post_norm.bias" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.0 + } +} +2025-05-28 03:12:59,772 - mmseg - INFO - Loaded 1159 images +2025-05-28 03:12:59,773 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth +2025-05-28 03:13:03,629 - mmseg - INFO - Start running, host: yiming@pasteur2.stanford.edu, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 03:13:03,629 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 03:13:03,630 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters +2025-05-28 03:13:03,630 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031222.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031222.log.json new file mode 100644 index 0000000000000000000000000000000000000000..029d5303e29b088d37565b6dac4945702c4a3401 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031222.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0: NVIDIA TITAN RTX\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 12.2, V12.2.91\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 1121623199, "exp_name": "mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "num_things_classes = 100\nnum_stuff_classes = 50\nnum_classes = 19\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderMask2Former',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=320,\n depths=[6, 6, 32, 6],\n groups=[10, 20, 40, 80],\n mlp_ratio=4.0,\n drop_path_rate=0.5,\n norm_layer='LN',\n layer_scale=None,\n offset_scale=1.0,\n post_norm=False,\n dw_kernel_size=5,\n res_post_norm=True,\n level2_post_norm=True,\n level2_post_norm_block_ids=[5, 11, 17, 23, 29],\n center_feature_scale=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='Mask2FormerHead',\n in_channels=[320, 640, 1280, 2560],\n feat_channels=256,\n out_channels=256,\n in_index=[0, 1, 2, 3],\n num_things_classes=100,\n num_stuff_classes=50,\n num_queries=100,\n num_transformer_feat_level=3,\n pixel_decoder=dict(\n type='MSDeformAttnPixelDecoder',\n num_outs=3,\n norm_cfg=dict(type='GN', num_groups=32),\n act_cfg=dict(type='ReLU'),\n encoder=dict(\n type='DetrTransformerEncoder',\n num_layers=6,\n transformerlayers=dict(\n type='BaseTransformerLayer',\n attn_cfgs=dict(\n type='MultiScaleDeformableAttention',\n embed_dims=256,\n num_heads=8,\n num_levels=3,\n num_points=4,\n im2col_step=64,\n dropout=0.0,\n batch_first=False,\n norm_cfg=None,\n init_cfg=None),\n ffn_cfgs=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n with_cp=False),\n operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n init_cfg=None),\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n init_cfg=None),\n enforce_decoder_input_project=False,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n transformer_decoder=dict(\n type='DetrTransformerDecoder',\n return_intermediate=True,\n num_layers=9,\n transformerlayers=dict(\n type='DetrTransformerDecoderLayer',\n attn_cfgs=dict(\n type='MultiheadAttention',\n embed_dims=256,\n num_heads=8,\n attn_drop=0.0,\n proj_drop=0.0,\n dropout_layer=None,\n batch_first=False),\n ffn_cfgs=dict(\n embed_dims=256,\n feedforward_channels=2048,\n num_fcs=2,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_drop=0.0,\n dropout_layer=None,\n add_identity=True,\n with_cp=False),\n feedforward_channels=2048,\n operation_order=('cross_attn', 'norm', 'self_attn', 'norm',\n 'ffn', 'norm')),\n init_cfg=None),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=2.0,\n reduction='mean',\n class_weight=[\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1\n ]),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=5.0),\n loss_dice=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n reduction='mean',\n naive_dice=True,\n eps=1.0,\n loss_weight=5.0),\n num_classes=19,\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512))),\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512)),\n init_cfg=None)\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (1024, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(\n type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=1e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=50,\n layer_decay_rate=0.95,\n depths=[6, 6, 32, 6],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 1)\nauto_resume = False\ndevice = 'cuda'\nseed = 1121623199\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031351.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031351.log new file mode 100644 index 0000000000000000000000000000000000000000..549959d5e3c46dc6c27e035afe594a4d0689b26d --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031351.log @@ -0,0 +1,7659 @@ +2025-05-28 03:13:51,628 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 03:13:51,638 - mmseg - INFO - OpenCV num_threads is `32 +2025-05-28 03:13:51,638 - mmseg - INFO - OMP num threads is 1 +2025-05-28 03:13:51,685 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0,1,2,3,4,5,6,7: NVIDIA A100-SXM4-80GB +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 11.7, V11.7.64 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 03:13:51,686 - mmseg - INFO - Distributed training: True +2025-05-28 03:13:52,090 - mmseg - INFO - Config: +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 8) +auto_resume = False + +2025-05-28 03:14:05,205 - mmseg - INFO - Set random seed to 1985235357, deterministic: False +2025-05-28 03:14:05,207 - mmseg - INFO - using core type: DCNv3 +2025-05-28 03:14:05,207 - mmseg - INFO - using activation layer: GELU +2025-05-28 03:14:05,207 - mmseg - INFO - using main norm layer: LN +2025-05-28 03:14:05,207 - mmseg - INFO - using dpr: linear, 0.5 +2025-05-28 03:14:05,207 - mmseg - INFO - level2_post_norm: True +2025-05-28 03:14:05,207 - mmseg - INFO - level2_post_norm_block_ids: [5, 11, 17, 23, 29] +2025-05-28 03:14:05,207 - mmseg - INFO - res_post_norm: True +2025-05-28 03:14:05,207 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 03:14:19,244 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([160, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.weight - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm1.1.bias - torch.Size([160]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.weight - torch.Size([320, 160, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.conv2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.patch_embed.norm2.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.0.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.1.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.2.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.3.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.4.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([10, 320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([10]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.weight - torch.Size([320, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.offset.weight - torch.Size([180, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.offset.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.mask.weight - torch.Size([90, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.mask.bias - torch.Size([90]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.input_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.input_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.dcn.output_proj.weight - torch.Size([320, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.dcn.output_proj.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc1.weight - torch.Size([1280, 320]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.mlp.fc2.weight - torch.Size([320, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.5.mlp.fc2.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm1.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.blocks.5.res_post_norm2.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.weight - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.norm.0.bias - torch.Size([320]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.conv.weight - torch.Size([640, 320, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.0.downsample.norm.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.0.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.1.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.2.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.3.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.4.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([20, 640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.weight - torch.Size([640, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.offset.weight - torch.Size([360, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.offset.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.mask.weight - torch.Size([180, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.mask.bias - torch.Size([180]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.input_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.input_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.dcn.output_proj.weight - torch.Size([640, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.dcn.output_proj.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc1.weight - torch.Size([2560, 640]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.mlp.fc2.weight - torch.Size([640, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.5.mlp.fc2.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm1.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.blocks.5.res_post_norm2.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.weight - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.norm.0.bias - torch.Size([640]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.conv.weight - torch.Size([1280, 640, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.1.downsample.norm.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.0.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.1.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.2.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.3.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.4.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.5.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.6.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.7.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.8.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.9.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.10.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.11.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.12.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.13.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.14.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.15.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.16.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.17.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.18.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.19.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.20.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.21.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.22.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.23.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.24.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.24.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.25.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.25.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.26.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.26.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.27.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.27.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.28.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.28.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.29.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.29.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.30.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.30.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight - torch.Size([40, 1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias - torch.Size([40]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.weight - torch.Size([1280, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.offset.weight - torch.Size([720, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.offset.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.mask.weight - torch.Size([360, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.mask.bias - torch.Size([360]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.input_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.input_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.dcn.output_proj.weight - torch.Size([1280, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.dcn.output_proj.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc1.weight - torch.Size([5120, 1280]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc1.bias - torch.Size([5120]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.mlp.fc2.weight - torch.Size([1280, 5120]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.31.mlp.fc2.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.blocks.31.res_post_norm2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.norm.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.0.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.1.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.2.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.3.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.weight - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.post_norms.4.0.bias - torch.Size([1280]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.conv.weight - torch.Size([2560, 1280, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.2.downsample.norm.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.0.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.1.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.2.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.3.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.4.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight - torch.Size([80, 2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias - torch.Size([80]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.weight - torch.Size([2560, 1, 5, 5]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.offset.weight - torch.Size([1440, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.offset.bias - torch.Size([1440]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.mask.weight - torch.Size([720, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.mask.bias - torch.Size([720]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.input_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.input_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.dcn.output_proj.weight - torch.Size([2560, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.dcn.output_proj.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc1.weight - torch.Size([10240, 2560]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc1.bias - torch.Size([10240]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.mlp.fc2.weight - torch.Size([2560, 10240]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.5.mlp.fc2.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm1.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.blocks.5.res_post_norm2.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.weight - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +backbone.levels.3.norm.0.bias - torch.Size([2560]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.conv.weight - torch.Size([256, 2560, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.0.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.conv.weight - torch.Size([256, 1280, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.1.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.1.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.conv.weight - torch.Size([256, 640, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.input_convs.2.conv.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.input_convs.2.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight - torch.Size([192, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight - torch.Size([96, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([1024, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([1024]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 1024]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.encoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.level_encoding.weight - torch.Size([3, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.conv.weight - torch.Size([256, 320, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.lateral_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.lateral_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.conv.weight - torch.Size([256, 256, 3, 3]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.output_convs.0.gn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.output_convs.0.gn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.pixel_decoder.mask_feature.weight - torch.Size([256, 256, 1, 1]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.pixel_decoder.mask_feature.bias - torch.Size([256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.0.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.1.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.2.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.3.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.4.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.5.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.6.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.7.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight - torch.Size([768, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight - torch.Size([256, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight - torch.Size([2048, 256]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias - torch.Size([2048]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight - torch.Size([256, 2048]): +Initialized by user-defined `init_weights` in Mask2FormerHead + +decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.1.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.layers.8.norms.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.transformer_decoder.post_norm.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_embed.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.query_feat.weight - torch.Size([100, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.level_embed.weight - torch.Size([3, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.weight - torch.Size([20, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.cls_embed.bias - torch.Size([20]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.0.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.2.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.weight - torch.Size([256, 256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former + +decode_head.mask_embed.4.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoderMask2Former +2025-05-28 03:14:26,835 - mmseg - INFO - Loaded 2316 images +2025-05-28 03:14:26,836 - mmseg - INFO - Built training dataset from config: {'type': 'CityscapesDataset', 'data_root': '/pasteur/u/yiming/homework4/cityscapes', 'img_dir': 'leftImg8bit/', 'ann_dir': 'gtFine/', 'pipeline': [{'type': 'LoadImageFromFile'}, {'type': 'LoadAnnotations'}, {'type': 'Resize', 'img_scale': (2048, 1024), 'ratio_range': (0.5, 2.0)}, {'type': 'RandomCrop', 'crop_size': (1024, 1024), 'cat_max_ratio': 0.75}, {'type': 'RandomFlip', 'prob': 0.5}, {'type': 'PhotoMetricDistortion'}, {'type': 'Normalize', 'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True}, {'type': 'Pad', 'size': (1024, 1024), 'pad_val': 0, 'seg_pad_val': 255}, {'type': 'ToMask'}, {'type': 'DefaultFormatBundle'}, {'type': 'Collect', 'keys': ['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']}], 'split': 'splits/fold_1_train_split.txt'} +2025-05-28 03:14:26,836 - mmseg - INFO - Number of samples in training dataset: 2316 +2025-05-28 03:14:28,690 - mmseg - INFO - {'num_layers': 50, 'layer_decay_rate': 0.95, 'depths': [6, 6, 32, 6], 'offset_lr_scale': 1.0} +2025-05-28 03:14:28,690 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.950000 - 52 +2025-05-28 03:14:28,699 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight", + "decode_head.query_embed.weight", + "decode_head.query_feat.weight", + "decode_head.level_embed.weight", + "decode_head.cls_embed.weight", + "decode_head.mask_embed.0.weight", + "decode_head.mask_embed.2.weight", + "decode_head.mask_embed.4.weight" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias", + "decode_head.cls_embed.bias", + "decode_head.mask_embed.0.bias", + "decode_head.mask_embed.2.bias", + "decode_head.mask_embed.4.bias" + ], + "lr_scale": 0.07309772651287749, + "lr": 7.30977265128775e-07, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias", + "backbone.levels.0.blocks.0.res_post_norm1.0.weight", + "backbone.levels.0.blocks.0.res_post_norm1.0.bias", + "backbone.levels.0.blocks.0.res_post_norm2.0.weight", + "backbone.levels.0.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.07694497527671315, + "lr": 7.694497527671315e-07, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias", + "backbone.levels.0.blocks.1.res_post_norm1.0.weight", + "backbone.levels.0.blocks.1.res_post_norm1.0.bias", + "backbone.levels.0.blocks.1.res_post_norm2.0.weight", + "backbone.levels.0.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.0809947108175928, + "lr": 8.099471081759281e-07, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias", + "backbone.levels.0.blocks.2.res_post_norm1.0.weight", + "backbone.levels.0.blocks.2.res_post_norm1.0.bias", + "backbone.levels.0.blocks.2.res_post_norm2.0.weight", + "backbone.levels.0.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.0852575903343082, + "lr": 8.525759033430821e-07, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias", + "backbone.levels.0.blocks.3.res_post_norm1.0.weight", + "backbone.levels.0.blocks.3.res_post_norm1.0.bias", + "backbone.levels.0.blocks.3.res_post_norm2.0.weight", + "backbone.levels.0.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.08974483193085075, + "lr": 8.974483193085076e-07, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias", + "backbone.levels.0.blocks.4.res_post_norm1.0.weight", + "backbone.levels.0.blocks.4.res_post_norm1.0.bias", + "backbone.levels.0.blocks.4.res_post_norm2.0.weight", + "backbone.levels.0.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.09446824413773763, + "lr": 9.446824413773764e-07, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.norm1.0.weight", + "backbone.levels.0.blocks.5.norm1.0.bias", + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.5.dcn.offset.bias", + "backbone.levels.0.blocks.5.dcn.mask.bias", + "backbone.levels.0.blocks.5.dcn.input_proj.bias", + "backbone.levels.0.blocks.5.dcn.output_proj.bias", + "backbone.levels.0.blocks.5.norm2.0.weight", + "backbone.levels.0.blocks.5.norm2.0.bias", + "backbone.levels.0.blocks.5.mlp.fc1.bias", + "backbone.levels.0.blocks.5.mlp.fc2.bias", + "backbone.levels.0.blocks.5.res_post_norm1.0.weight", + "backbone.levels.0.blocks.5.res_post_norm1.0.bias", + "backbone.levels.0.blocks.5.res_post_norm2.0.weight", + "backbone.levels.0.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.0 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.0.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.5.dcn.offset.weight", + "backbone.levels.0.blocks.5.dcn.mask.weight", + "backbone.levels.0.blocks.5.dcn.input_proj.weight", + "backbone.levels.0.blocks.5.dcn.output_proj.weight", + "backbone.levels.0.blocks.5.mlp.fc1.weight", + "backbone.levels.0.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.09944025698709225, + "lr": 9.944025698709225e-07, + "weight_decay": 0.05 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.0.norm.0.weight", + "backbone.levels.0.norm.0.bias", + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias", + "backbone.levels.1.blocks.0.res_post_norm1.0.weight", + "backbone.levels.1.blocks.0.res_post_norm1.0.bias", + "backbone.levels.1.blocks.0.res_post_norm2.0.weight", + "backbone.levels.1.blocks.0.res_post_norm2.0.bias" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.10467395472325501, + "lr": 1.0467395472325502e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias", + "backbone.levels.1.blocks.1.res_post_norm1.0.weight", + "backbone.levels.1.blocks.1.res_post_norm1.0.bias", + "backbone.levels.1.blocks.1.res_post_norm2.0.weight", + "backbone.levels.1.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.11018311023500528, + "lr": 1.1018311023500528e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias", + "backbone.levels.1.blocks.2.res_post_norm1.0.weight", + "backbone.levels.1.blocks.2.res_post_norm1.0.bias", + "backbone.levels.1.blocks.2.res_post_norm2.0.weight", + "backbone.levels.1.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.11598222130000556, + "lr": 1.1598222130000556e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias", + "backbone.levels.1.blocks.3.res_post_norm1.0.weight", + "backbone.levels.1.blocks.3.res_post_norm1.0.bias", + "backbone.levels.1.blocks.3.res_post_norm2.0.weight", + "backbone.levels.1.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.12208654873684796, + "lr": 1.2208654873684798e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias", + "backbone.levels.1.blocks.4.res_post_norm1.0.weight", + "backbone.levels.1.blocks.4.res_post_norm1.0.bias", + "backbone.levels.1.blocks.4.res_post_norm2.0.weight", + "backbone.levels.1.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.0 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.12851215656510312, + "lr": 1.2851215656510314e-06, + "weight_decay": 0.05 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.norm1.0.weight", + "backbone.levels.1.blocks.5.norm1.0.bias", + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.5.dcn.offset.bias", + "backbone.levels.1.blocks.5.dcn.mask.bias", + "backbone.levels.1.blocks.5.dcn.input_proj.bias", + "backbone.levels.1.blocks.5.dcn.output_proj.bias", + "backbone.levels.1.blocks.5.norm2.0.weight", + "backbone.levels.1.blocks.5.norm2.0.bias", + "backbone.levels.1.blocks.5.mlp.fc1.bias", + "backbone.levels.1.blocks.5.mlp.fc2.bias", + "backbone.levels.1.blocks.5.res_post_norm1.0.weight", + "backbone.levels.1.blocks.5.res_post_norm1.0.bias", + "backbone.levels.1.blocks.5.res_post_norm2.0.weight", + "backbone.levels.1.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.1.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.1.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.5.dcn.offset.weight", + "backbone.levels.1.blocks.5.dcn.mask.weight", + "backbone.levels.1.blocks.5.dcn.input_proj.weight", + "backbone.levels.1.blocks.5.dcn.output_proj.weight", + "backbone.levels.1.blocks.5.mlp.fc1.weight", + "backbone.levels.1.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.13527595427905592, + "lr": 1.3527595427905593e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.1.norm.0.weight", + "backbone.levels.1.norm.0.bias", + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias", + "backbone.levels.2.blocks.0.res_post_norm1.0.weight", + "backbone.levels.2.blocks.0.res_post_norm1.0.bias", + "backbone.levels.2.blocks.0.res_post_norm2.0.weight", + "backbone.levels.2.blocks.0.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.0.0.weight", + "backbone.levels.2.post_norms.0.0.bias" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.14239574134637467, + "lr": 1.4239574134637468e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias", + "backbone.levels.2.blocks.1.res_post_norm1.0.weight", + "backbone.levels.2.blocks.1.res_post_norm1.0.bias", + "backbone.levels.2.blocks.1.res_post_norm2.0.weight", + "backbone.levels.2.blocks.1.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.1.0.weight", + "backbone.levels.2.post_norms.1.0.bias" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.14989025404881545, + "lr": 1.4989025404881547e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias", + "backbone.levels.2.blocks.2.res_post_norm1.0.weight", + "backbone.levels.2.blocks.2.res_post_norm1.0.bias", + "backbone.levels.2.blocks.2.res_post_norm2.0.weight", + "backbone.levels.2.blocks.2.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.2.0.weight", + "backbone.levels.2.post_norms.2.0.bias" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.1577792147882268, + "lr": 1.577792147882268e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias", + "backbone.levels.2.blocks.3.res_post_norm1.0.weight", + "backbone.levels.2.blocks.3.res_post_norm1.0.bias", + "backbone.levels.2.blocks.3.res_post_norm2.0.weight", + "backbone.levels.2.blocks.3.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.3.0.weight", + "backbone.levels.2.post_norms.3.0.bias" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.16608338398760716, + "lr": 1.6608338398760719e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias", + "backbone.levels.2.blocks.4.res_post_norm1.0.weight", + "backbone.levels.2.blocks.4.res_post_norm1.0.bias", + "backbone.levels.2.blocks.4.res_post_norm2.0.weight", + "backbone.levels.2.blocks.4.res_post_norm2.0.bias", + "backbone.levels.2.post_norms.4.0.weight", + "backbone.levels.2.post_norms.4.0.bias" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.174824614723797, + "lr": 1.7482461472379704e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias", + "backbone.levels.2.blocks.5.res_post_norm1.0.weight", + "backbone.levels.2.blocks.5.res_post_norm1.0.bias", + "backbone.levels.2.blocks.5.res_post_norm2.0.weight", + "backbone.levels.2.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.18402591023557582, + "lr": 1.8402591023557584e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias", + "backbone.levels.2.blocks.6.res_post_norm1.0.weight", + "backbone.levels.2.blocks.6.res_post_norm1.0.bias", + "backbone.levels.2.blocks.6.res_post_norm2.0.weight", + "backbone.levels.2.blocks.6.res_post_norm2.0.bias" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.19371148445850087, + "lr": 1.937114844585009e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias", + "backbone.levels.2.blocks.7.res_post_norm1.0.weight", + "backbone.levels.2.blocks.7.res_post_norm1.0.bias", + "backbone.levels.2.blocks.7.res_post_norm2.0.weight", + "backbone.levels.2.blocks.7.res_post_norm2.0.bias" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.2039068257457904, + "lr": 2.039068257457904e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias", + "backbone.levels.2.blocks.8.res_post_norm1.0.weight", + "backbone.levels.2.blocks.8.res_post_norm1.0.bias", + "backbone.levels.2.blocks.8.res_post_norm2.0.weight", + "backbone.levels.2.blocks.8.res_post_norm2.0.bias" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.21463876394293727, + "lr": 2.146387639429373e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias", + "backbone.levels.2.blocks.9.res_post_norm1.0.weight", + "backbone.levels.2.blocks.9.res_post_norm1.0.bias", + "backbone.levels.2.blocks.9.res_post_norm2.0.weight", + "backbone.levels.2.blocks.9.res_post_norm2.0.bias" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.22593554099256555, + "lr": 2.2593554099256557e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias", + "backbone.levels.2.blocks.10.res_post_norm1.0.weight", + "backbone.levels.2.blocks.10.res_post_norm1.0.bias", + "backbone.levels.2.blocks.10.res_post_norm2.0.weight", + "backbone.levels.2.blocks.10.res_post_norm2.0.bias" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.23782688525533216, + "lr": 2.378268852553322e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias", + "backbone.levels.2.blocks.11.res_post_norm1.0.weight", + "backbone.levels.2.blocks.11.res_post_norm1.0.bias", + "backbone.levels.2.blocks.11.res_post_norm2.0.weight", + "backbone.levels.2.blocks.11.res_post_norm2.0.bias" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.2503440897424549, + "lr": 2.5034408974245495e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias", + "backbone.levels.2.blocks.12.res_post_norm1.0.weight", + "backbone.levels.2.blocks.12.res_post_norm1.0.bias", + "backbone.levels.2.blocks.12.res_post_norm2.0.weight", + "backbone.levels.2.blocks.12.res_post_norm2.0.bias" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.26352009446574204, + "lr": 2.6352009446574206e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias", + "backbone.levels.2.blocks.13.res_post_norm1.0.weight", + "backbone.levels.2.blocks.13.res_post_norm1.0.bias", + "backbone.levels.2.blocks.13.res_post_norm2.0.weight", + "backbone.levels.2.blocks.13.res_post_norm2.0.bias" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.27738957312183377, + "lr": 2.7738957312183377e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias", + "backbone.levels.2.blocks.14.res_post_norm1.0.weight", + "backbone.levels.2.blocks.14.res_post_norm1.0.bias", + "backbone.levels.2.blocks.14.res_post_norm2.0.weight", + "backbone.levels.2.blocks.14.res_post_norm2.0.bias" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.2919890243387724, + "lr": 2.919890243387724e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias", + "backbone.levels.2.blocks.15.res_post_norm1.0.weight", + "backbone.levels.2.blocks.15.res_post_norm1.0.bias", + "backbone.levels.2.blocks.15.res_post_norm2.0.weight", + "backbone.levels.2.blocks.15.res_post_norm2.0.bias" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.3073568677250236, + "lr": 3.073568677250236e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias", + "backbone.levels.2.blocks.16.res_post_norm1.0.weight", + "backbone.levels.2.blocks.16.res_post_norm1.0.bias", + "backbone.levels.2.blocks.16.res_post_norm2.0.weight", + "backbone.levels.2.blocks.16.res_post_norm2.0.bias" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.323533544973709, + "lr": 3.2353354497370905e-06, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias", + "backbone.levels.2.blocks.17.res_post_norm1.0.weight", + "backbone.levels.2.blocks.17.res_post_norm1.0.bias", + "backbone.levels.2.blocks.17.res_post_norm2.0.weight", + "backbone.levels.2.blocks.17.res_post_norm2.0.bias" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.3405616262881148, + "lr": 3.4056162628811484e-06, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias", + "backbone.levels.2.blocks.18.res_post_norm1.0.weight", + "backbone.levels.2.blocks.18.res_post_norm1.0.bias", + "backbone.levels.2.blocks.18.res_post_norm2.0.weight", + "backbone.levels.2.blocks.18.res_post_norm2.0.bias" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.3584859224085419, + "lr": 3.584859224085419e-06, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias", + "backbone.levels.2.blocks.19.res_post_norm1.0.weight", + "backbone.levels.2.blocks.19.res_post_norm1.0.bias", + "backbone.levels.2.blocks.19.res_post_norm2.0.weight", + "backbone.levels.2.blocks.19.res_post_norm2.0.bias" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.37735360253530725, + "lr": 3.7735360253530726e-06, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias", + "backbone.levels.2.blocks.20.res_post_norm1.0.weight", + "backbone.levels.2.blocks.20.res_post_norm1.0.bias", + "backbone.levels.2.blocks.20.res_post_norm2.0.weight", + "backbone.levels.2.blocks.20.res_post_norm2.0.bias" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.3972143184582182, + "lr": 3.972143184582182e-06, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias", + "backbone.levels.2.blocks.21.res_post_norm1.0.weight", + "backbone.levels.2.blocks.21.res_post_norm1.0.bias", + "backbone.levels.2.blocks.21.res_post_norm2.0.weight", + "backbone.levels.2.blocks.21.res_post_norm2.0.bias" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.4181203352191771, + "lr": 4.181203352191771e-06, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias", + "backbone.levels.2.blocks.22.res_post_norm1.0.weight", + "backbone.levels.2.blocks.22.res_post_norm1.0.bias", + "backbone.levels.2.blocks.22.res_post_norm2.0.weight", + "backbone.levels.2.blocks.22.res_post_norm2.0.bias" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.0 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.44012666865176536, + "lr": 4.401266686517654e-06, + "weight_decay": 0.05 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias", + "backbone.levels.2.blocks.23.res_post_norm1.0.weight", + "backbone.levels.2.blocks.23.res_post_norm1.0.bias", + "backbone.levels.2.blocks.23.res_post_norm2.0.weight", + "backbone.levels.2.blocks.23.res_post_norm2.0.bias" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.46329123015975304, + "lr": 4.632912301597531e-06, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.norm1.0.weight", + "backbone.levels.2.blocks.24.norm1.0.bias", + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.24.dcn.offset.bias", + "backbone.levels.2.blocks.24.dcn.mask.bias", + "backbone.levels.2.blocks.24.dcn.input_proj.bias", + "backbone.levels.2.blocks.24.dcn.output_proj.bias", + "backbone.levels.2.blocks.24.norm2.0.weight", + "backbone.levels.2.blocks.24.norm2.0.bias", + "backbone.levels.2.blocks.24.mlp.fc1.bias", + "backbone.levels.2.blocks.24.mlp.fc2.bias", + "backbone.levels.2.blocks.24.res_post_norm1.0.weight", + "backbone.levels.2.blocks.24.res_post_norm1.0.bias", + "backbone.levels.2.blocks.24.res_post_norm2.0.weight", + "backbone.levels.2.blocks.24.res_post_norm2.0.bias" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.2.blocks.24.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.24.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.24.dcn.offset.weight", + "backbone.levels.2.blocks.24.dcn.mask.weight", + "backbone.levels.2.blocks.24.dcn.input_proj.weight", + "backbone.levels.2.blocks.24.dcn.output_proj.weight", + "backbone.levels.2.blocks.24.mlp.fc1.weight", + "backbone.levels.2.blocks.24.mlp.fc2.weight" + ], + "lr_scale": 0.48767497911552954, + "lr": 4.876749791155296e-06, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.norm1.0.weight", + "backbone.levels.2.blocks.25.norm1.0.bias", + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.25.dcn.offset.bias", + "backbone.levels.2.blocks.25.dcn.mask.bias", + "backbone.levels.2.blocks.25.dcn.input_proj.bias", + "backbone.levels.2.blocks.25.dcn.output_proj.bias", + "backbone.levels.2.blocks.25.norm2.0.weight", + "backbone.levels.2.blocks.25.norm2.0.bias", + "backbone.levels.2.blocks.25.mlp.fc1.bias", + "backbone.levels.2.blocks.25.mlp.fc2.bias", + "backbone.levels.2.blocks.25.res_post_norm1.0.weight", + "backbone.levels.2.blocks.25.res_post_norm1.0.bias", + "backbone.levels.2.blocks.25.res_post_norm2.0.weight", + "backbone.levels.2.blocks.25.res_post_norm2.0.bias" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.2.blocks.25.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.25.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.25.dcn.offset.weight", + "backbone.levels.2.blocks.25.dcn.mask.weight", + "backbone.levels.2.blocks.25.dcn.input_proj.weight", + "backbone.levels.2.blocks.25.dcn.output_proj.weight", + "backbone.levels.2.blocks.25.mlp.fc1.weight", + "backbone.levels.2.blocks.25.mlp.fc2.weight" + ], + "lr_scale": 0.5133420832795048, + "lr": 5.133420832795049e-06, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.norm1.0.weight", + "backbone.levels.2.blocks.26.norm1.0.bias", + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.26.dcn.offset.bias", + "backbone.levels.2.blocks.26.dcn.mask.bias", + "backbone.levels.2.blocks.26.dcn.input_proj.bias", + "backbone.levels.2.blocks.26.dcn.output_proj.bias", + "backbone.levels.2.blocks.26.norm2.0.weight", + "backbone.levels.2.blocks.26.norm2.0.bias", + "backbone.levels.2.blocks.26.mlp.fc1.bias", + "backbone.levels.2.blocks.26.mlp.fc2.bias", + "backbone.levels.2.blocks.26.res_post_norm1.0.weight", + "backbone.levels.2.blocks.26.res_post_norm1.0.bias", + "backbone.levels.2.blocks.26.res_post_norm2.0.weight", + "backbone.levels.2.blocks.26.res_post_norm2.0.bias" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.2.blocks.26.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.26.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.26.dcn.offset.weight", + "backbone.levels.2.blocks.26.dcn.mask.weight", + "backbone.levels.2.blocks.26.dcn.input_proj.weight", + "backbone.levels.2.blocks.26.dcn.output_proj.weight", + "backbone.levels.2.blocks.26.mlp.fc1.weight", + "backbone.levels.2.blocks.26.mlp.fc2.weight" + ], + "lr_scale": 0.5403600876626367, + "lr": 5.403600876626367e-06, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.norm1.0.weight", + "backbone.levels.2.blocks.27.norm1.0.bias", + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.27.dcn.offset.bias", + "backbone.levels.2.blocks.27.dcn.mask.bias", + "backbone.levels.2.blocks.27.dcn.input_proj.bias", + "backbone.levels.2.blocks.27.dcn.output_proj.bias", + "backbone.levels.2.blocks.27.norm2.0.weight", + "backbone.levels.2.blocks.27.norm2.0.bias", + "backbone.levels.2.blocks.27.mlp.fc1.bias", + "backbone.levels.2.blocks.27.mlp.fc2.bias", + "backbone.levels.2.blocks.27.res_post_norm1.0.weight", + "backbone.levels.2.blocks.27.res_post_norm1.0.bias", + "backbone.levels.2.blocks.27.res_post_norm2.0.weight", + "backbone.levels.2.blocks.27.res_post_norm2.0.bias" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.0 + }, + "layer_40_decay": { + "param_names": [ + "backbone.levels.2.blocks.27.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.27.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.27.dcn.offset.weight", + "backbone.levels.2.blocks.27.dcn.mask.weight", + "backbone.levels.2.blocks.27.dcn.input_proj.weight", + "backbone.levels.2.blocks.27.dcn.output_proj.weight", + "backbone.levels.2.blocks.27.mlp.fc1.weight", + "backbone.levels.2.blocks.27.mlp.fc2.weight" + ], + "lr_scale": 0.5688000922764597, + "lr": 5.688000922764597e-06, + "weight_decay": 0.05 + }, + "layer_41_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.norm1.0.weight", + "backbone.levels.2.blocks.28.norm1.0.bias", + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.28.dcn.offset.bias", + "backbone.levels.2.blocks.28.dcn.mask.bias", + "backbone.levels.2.blocks.28.dcn.input_proj.bias", + "backbone.levels.2.blocks.28.dcn.output_proj.bias", + "backbone.levels.2.blocks.28.norm2.0.weight", + "backbone.levels.2.blocks.28.norm2.0.bias", + "backbone.levels.2.blocks.28.mlp.fc1.bias", + "backbone.levels.2.blocks.28.mlp.fc2.bias", + "backbone.levels.2.blocks.28.res_post_norm1.0.weight", + "backbone.levels.2.blocks.28.res_post_norm1.0.bias", + "backbone.levels.2.blocks.28.res_post_norm2.0.weight", + "backbone.levels.2.blocks.28.res_post_norm2.0.bias" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.0 + }, + "layer_41_decay": { + "param_names": [ + "backbone.levels.2.blocks.28.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.28.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.28.dcn.offset.weight", + "backbone.levels.2.blocks.28.dcn.mask.weight", + "backbone.levels.2.blocks.28.dcn.input_proj.weight", + "backbone.levels.2.blocks.28.dcn.output_proj.weight", + "backbone.levels.2.blocks.28.mlp.fc1.weight", + "backbone.levels.2.blocks.28.mlp.fc2.weight" + ], + "lr_scale": 0.5987369392383787, + "lr": 5.987369392383788e-06, + "weight_decay": 0.05 + }, + "layer_42_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.norm1.0.weight", + "backbone.levels.2.blocks.29.norm1.0.bias", + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.29.dcn.offset.bias", + "backbone.levels.2.blocks.29.dcn.mask.bias", + "backbone.levels.2.blocks.29.dcn.input_proj.bias", + "backbone.levels.2.blocks.29.dcn.output_proj.bias", + "backbone.levels.2.blocks.29.norm2.0.weight", + "backbone.levels.2.blocks.29.norm2.0.bias", + "backbone.levels.2.blocks.29.mlp.fc1.bias", + "backbone.levels.2.blocks.29.mlp.fc2.bias", + "backbone.levels.2.blocks.29.res_post_norm1.0.weight", + "backbone.levels.2.blocks.29.res_post_norm1.0.bias", + "backbone.levels.2.blocks.29.res_post_norm2.0.weight", + "backbone.levels.2.blocks.29.res_post_norm2.0.bias" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.0 + }, + "layer_42_decay": { + "param_names": [ + "backbone.levels.2.blocks.29.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.29.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.29.dcn.offset.weight", + "backbone.levels.2.blocks.29.dcn.mask.weight", + "backbone.levels.2.blocks.29.dcn.input_proj.weight", + "backbone.levels.2.blocks.29.dcn.output_proj.weight", + "backbone.levels.2.blocks.29.mlp.fc1.weight", + "backbone.levels.2.blocks.29.mlp.fc2.weight" + ], + "lr_scale": 0.6302494097246091, + "lr": 6.302494097246091e-06, + "weight_decay": 0.05 + }, + "layer_43_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.norm1.0.weight", + "backbone.levels.2.blocks.30.norm1.0.bias", + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.30.dcn.offset.bias", + "backbone.levels.2.blocks.30.dcn.mask.bias", + "backbone.levels.2.blocks.30.dcn.input_proj.bias", + "backbone.levels.2.blocks.30.dcn.output_proj.bias", + "backbone.levels.2.blocks.30.norm2.0.weight", + "backbone.levels.2.blocks.30.norm2.0.bias", + "backbone.levels.2.blocks.30.mlp.fc1.bias", + "backbone.levels.2.blocks.30.mlp.fc2.bias", + "backbone.levels.2.blocks.30.res_post_norm1.0.weight", + "backbone.levels.2.blocks.30.res_post_norm1.0.bias", + "backbone.levels.2.blocks.30.res_post_norm2.0.weight", + "backbone.levels.2.blocks.30.res_post_norm2.0.bias" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.0 + }, + "layer_43_decay": { + "param_names": [ + "backbone.levels.2.blocks.30.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.30.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.30.dcn.offset.weight", + "backbone.levels.2.blocks.30.dcn.mask.weight", + "backbone.levels.2.blocks.30.dcn.input_proj.weight", + "backbone.levels.2.blocks.30.dcn.output_proj.weight", + "backbone.levels.2.blocks.30.mlp.fc1.weight", + "backbone.levels.2.blocks.30.mlp.fc2.weight" + ], + "lr_scale": 0.6634204312890623, + "lr": 6.634204312890623e-06, + "weight_decay": 0.05 + }, + "layer_44_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.norm1.0.weight", + "backbone.levels.2.blocks.31.norm1.0.bias", + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.31.dcn.offset.bias", + "backbone.levels.2.blocks.31.dcn.mask.bias", + "backbone.levels.2.blocks.31.dcn.input_proj.bias", + "backbone.levels.2.blocks.31.dcn.output_proj.bias", + "backbone.levels.2.blocks.31.norm2.0.weight", + "backbone.levels.2.blocks.31.norm2.0.bias", + "backbone.levels.2.blocks.31.mlp.fc1.bias", + "backbone.levels.2.blocks.31.mlp.fc2.bias", + "backbone.levels.2.blocks.31.res_post_norm1.0.weight", + "backbone.levels.2.blocks.31.res_post_norm1.0.bias", + "backbone.levels.2.blocks.31.res_post_norm2.0.weight", + "backbone.levels.2.blocks.31.res_post_norm2.0.bias" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.0 + }, + "layer_44_decay": { + "param_names": [ + "backbone.levels.2.blocks.31.dcn.center_feature_scale_proj_weight", + "backbone.levels.2.blocks.31.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.31.dcn.offset.weight", + "backbone.levels.2.blocks.31.dcn.mask.weight", + "backbone.levels.2.blocks.31.dcn.input_proj.weight", + "backbone.levels.2.blocks.31.dcn.output_proj.weight", + "backbone.levels.2.blocks.31.mlp.fc1.weight", + "backbone.levels.2.blocks.31.mlp.fc2.weight" + ], + "lr_scale": 0.6983372960937497, + "lr": 6.983372960937498e-06, + "weight_decay": 0.05 + }, + "layer_45_no_decay": { + "param_names": [ + "backbone.levels.2.norm.0.weight", + "backbone.levels.2.norm.0.bias", + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias", + "backbone.levels.3.blocks.0.res_post_norm1.0.weight", + "backbone.levels.3.blocks.0.res_post_norm1.0.bias", + "backbone.levels.3.blocks.0.res_post_norm2.0.weight", + "backbone.levels.3.blocks.0.res_post_norm2.0.bias", + "backbone.levels.3.norm.0.weight", + "backbone.levels.3.norm.0.bias" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.0 + }, + "layer_45_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7350918906249998, + "lr": 7.350918906249998e-06, + "weight_decay": 0.05 + }, + "layer_46_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias", + "backbone.levels.3.blocks.1.res_post_norm1.0.weight", + "backbone.levels.3.blocks.1.res_post_norm1.0.bias", + "backbone.levels.3.blocks.1.res_post_norm2.0.weight", + "backbone.levels.3.blocks.1.res_post_norm2.0.bias" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.0 + }, + "layer_46_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7737809374999998, + "lr": 7.737809374999999e-06, + "weight_decay": 0.05 + }, + "layer_47_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias", + "backbone.levels.3.blocks.2.res_post_norm1.0.weight", + "backbone.levels.3.blocks.2.res_post_norm1.0.bias", + "backbone.levels.3.blocks.2.res_post_norm2.0.weight", + "backbone.levels.3.blocks.2.res_post_norm2.0.bias" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.0 + }, + "layer_47_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8145062499999999, + "lr": 8.1450625e-06, + "weight_decay": 0.05 + }, + "layer_48_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias", + "backbone.levels.3.blocks.3.res_post_norm1.0.weight", + "backbone.levels.3.blocks.3.res_post_norm1.0.bias", + "backbone.levels.3.blocks.3.res_post_norm2.0.weight", + "backbone.levels.3.blocks.3.res_post_norm2.0.bias" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.0 + }, + "layer_48_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8573749999999999, + "lr": 8.573749999999999e-06, + "weight_decay": 0.05 + }, + "layer_49_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias", + "backbone.levels.3.blocks.4.res_post_norm1.0.weight", + "backbone.levels.3.blocks.4.res_post_norm1.0.bias", + "backbone.levels.3.blocks.4.res_post_norm2.0.weight", + "backbone.levels.3.blocks.4.res_post_norm2.0.bias" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.0 + }, + "layer_49_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.9025, + "lr": 9.025e-06, + "weight_decay": 0.05 + }, + "layer_50_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.norm1.0.weight", + "backbone.levels.3.blocks.5.norm1.0.bias", + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.5.dcn.offset.bias", + "backbone.levels.3.blocks.5.dcn.mask.bias", + "backbone.levels.3.blocks.5.dcn.input_proj.bias", + "backbone.levels.3.blocks.5.dcn.output_proj.bias", + "backbone.levels.3.blocks.5.norm2.0.weight", + "backbone.levels.3.blocks.5.norm2.0.bias", + "backbone.levels.3.blocks.5.mlp.fc1.bias", + "backbone.levels.3.blocks.5.mlp.fc2.bias", + "backbone.levels.3.blocks.5.res_post_norm1.0.weight", + "backbone.levels.3.blocks.5.res_post_norm1.0.bias", + "backbone.levels.3.blocks.5.res_post_norm2.0.weight", + "backbone.levels.3.blocks.5.res_post_norm2.0.bias" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.0 + }, + "layer_50_decay": { + "param_names": [ + "backbone.levels.3.blocks.5.dcn.center_feature_scale_proj_weight", + "backbone.levels.3.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.5.dcn.offset.weight", + "backbone.levels.3.blocks.5.dcn.mask.weight", + "backbone.levels.3.blocks.5.dcn.input_proj.weight", + "backbone.levels.3.blocks.5.dcn.output_proj.weight", + "backbone.levels.3.blocks.5.mlp.fc1.weight", + "backbone.levels.3.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.95, + "lr": 9.5e-06, + "weight_decay": 0.05 + }, + "layer_51_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.weight", + "decode_head.pixel_decoder.input_convs.1.conv.weight", + "decode_head.pixel_decoder.input_convs.2.conv.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.weight", + "decode_head.pixel_decoder.level_encoding.weight", + "decode_head.pixel_decoder.lateral_convs.0.conv.weight", + "decode_head.pixel_decoder.output_convs.0.conv.weight", + "decode_head.pixel_decoder.mask_feature.weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_weight", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.weight", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.weight" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.05 + }, + "layer_51_no_decay": { + "param_names": [ + "decode_head.pixel_decoder.input_convs.0.conv.bias", + "decode_head.pixel_decoder.input_convs.0.gn.weight", + "decode_head.pixel_decoder.input_convs.0.gn.bias", + "decode_head.pixel_decoder.input_convs.1.conv.bias", + "decode_head.pixel_decoder.input_convs.1.gn.weight", + "decode_head.pixel_decoder.input_convs.1.gn.bias", + "decode_head.pixel_decoder.input_convs.2.conv.bias", + "decode_head.pixel_decoder.input_convs.2.gn.weight", + "decode_head.pixel_decoder.input_convs.2.gn.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.0.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.1.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.2.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.3.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.4.norms.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.weight", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.sampling_offsets.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.attention_weights.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.value_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.attentions.0.output_proj.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.ffns.0.layers.1.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.0.bias", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.weight", + "decode_head.pixel_decoder.encoder.layers.5.norms.1.bias", + "decode_head.pixel_decoder.lateral_convs.0.gn.weight", + "decode_head.pixel_decoder.lateral_convs.0.gn.bias", + "decode_head.pixel_decoder.output_convs.0.gn.weight", + "decode_head.pixel_decoder.output_convs.0.gn.bias", + "decode_head.pixel_decoder.mask_feature.bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.0.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.0.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.0.norms.0.weight", + "decode_head.transformer_decoder.layers.0.norms.0.bias", + "decode_head.transformer_decoder.layers.0.norms.1.weight", + "decode_head.transformer_decoder.layers.0.norms.1.bias", + "decode_head.transformer_decoder.layers.0.norms.2.weight", + "decode_head.transformer_decoder.layers.0.norms.2.bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.1.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.1.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.1.norms.0.weight", + "decode_head.transformer_decoder.layers.1.norms.0.bias", + "decode_head.transformer_decoder.layers.1.norms.1.weight", + "decode_head.transformer_decoder.layers.1.norms.1.bias", + "decode_head.transformer_decoder.layers.1.norms.2.weight", + "decode_head.transformer_decoder.layers.1.norms.2.bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.2.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.2.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.2.norms.0.weight", + "decode_head.transformer_decoder.layers.2.norms.0.bias", + "decode_head.transformer_decoder.layers.2.norms.1.weight", + "decode_head.transformer_decoder.layers.2.norms.1.bias", + "decode_head.transformer_decoder.layers.2.norms.2.weight", + "decode_head.transformer_decoder.layers.2.norms.2.bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.3.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.3.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.3.norms.0.weight", + "decode_head.transformer_decoder.layers.3.norms.0.bias", + "decode_head.transformer_decoder.layers.3.norms.1.weight", + "decode_head.transformer_decoder.layers.3.norms.1.bias", + "decode_head.transformer_decoder.layers.3.norms.2.weight", + "decode_head.transformer_decoder.layers.3.norms.2.bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.4.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.4.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.4.norms.0.weight", + "decode_head.transformer_decoder.layers.4.norms.0.bias", + "decode_head.transformer_decoder.layers.4.norms.1.weight", + "decode_head.transformer_decoder.layers.4.norms.1.bias", + "decode_head.transformer_decoder.layers.4.norms.2.weight", + "decode_head.transformer_decoder.layers.4.norms.2.bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.5.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.5.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.5.norms.0.weight", + "decode_head.transformer_decoder.layers.5.norms.0.bias", + "decode_head.transformer_decoder.layers.5.norms.1.weight", + "decode_head.transformer_decoder.layers.5.norms.1.bias", + "decode_head.transformer_decoder.layers.5.norms.2.weight", + "decode_head.transformer_decoder.layers.5.norms.2.bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.6.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.6.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.6.norms.0.weight", + "decode_head.transformer_decoder.layers.6.norms.0.bias", + "decode_head.transformer_decoder.layers.6.norms.1.weight", + "decode_head.transformer_decoder.layers.6.norms.1.bias", + "decode_head.transformer_decoder.layers.6.norms.2.weight", + "decode_head.transformer_decoder.layers.6.norms.2.bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.7.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.7.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.7.norms.0.weight", + "decode_head.transformer_decoder.layers.7.norms.0.bias", + "decode_head.transformer_decoder.layers.7.norms.1.weight", + "decode_head.transformer_decoder.layers.7.norms.1.bias", + "decode_head.transformer_decoder.layers.7.norms.2.weight", + "decode_head.transformer_decoder.layers.7.norms.2.bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.0.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.in_proj_bias", + "decode_head.transformer_decoder.layers.8.attentions.1.attn.out_proj.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.0.0.bias", + "decode_head.transformer_decoder.layers.8.ffns.0.layers.1.bias", + "decode_head.transformer_decoder.layers.8.norms.0.weight", + "decode_head.transformer_decoder.layers.8.norms.0.bias", + "decode_head.transformer_decoder.layers.8.norms.1.weight", + "decode_head.transformer_decoder.layers.8.norms.1.bias", + "decode_head.transformer_decoder.layers.8.norms.2.weight", + "decode_head.transformer_decoder.layers.8.norms.2.bias", + "decode_head.transformer_decoder.post_norm.weight", + "decode_head.transformer_decoder.post_norm.bias" + ], + "lr_scale": 1.0, + "lr": 1e-05, + "weight_decay": 0.0 + } +} +2025-05-28 03:14:28,716 - mmseg - INFO - Loaded 1159 images +2025-05-28 03:14:28,717 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth +2025-05-28 03:14:49,210 - mmseg - INFO - Start running, host: yiming@pasteur-hgx-1, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 03:14:49,210 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 03:14:49,211 - mmseg - INFO - workflow: [('train', 1)], max: 80000 iters +2025-05-28 03:14:49,211 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031351.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031351.log.json new file mode 100644 index 0000000000000000000000000000000000000000..b60e8b049f5d69230a2cc68ab86cbbdc05841377 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031351.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0,1,2,3,4,5,6,7: NVIDIA A100-SXM4-80GB\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 11.7, V11.7.64\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 1985235357, "exp_name": "mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "num_things_classes = 100\nnum_stuff_classes = 50\nnum_classes = 19\nnorm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoderMask2Former',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=320,\n depths=[6, 6, 32, 6],\n groups=[10, 20, 40, 80],\n mlp_ratio=4.0,\n drop_path_rate=0.5,\n norm_layer='LN',\n layer_scale=None,\n offset_scale=1.0,\n post_norm=False,\n dw_kernel_size=5,\n res_post_norm=True,\n level2_post_norm=True,\n level2_post_norm_block_ids=[5, 11, 17, 23, 29],\n center_feature_scale=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='Mask2FormerHead',\n in_channels=[320, 640, 1280, 2560],\n feat_channels=256,\n out_channels=256,\n in_index=[0, 1, 2, 3],\n num_things_classes=100,\n num_stuff_classes=50,\n num_queries=100,\n num_transformer_feat_level=3,\n pixel_decoder=dict(\n type='MSDeformAttnPixelDecoder',\n num_outs=3,\n norm_cfg=dict(type='GN', num_groups=32),\n act_cfg=dict(type='ReLU'),\n encoder=dict(\n type='DetrTransformerEncoder',\n num_layers=6,\n transformerlayers=dict(\n type='BaseTransformerLayer',\n attn_cfgs=dict(\n type='MultiScaleDeformableAttention',\n embed_dims=256,\n num_heads=8,\n num_levels=3,\n num_points=4,\n im2col_step=64,\n dropout=0.0,\n batch_first=False,\n norm_cfg=None,\n init_cfg=None),\n ffn_cfgs=dict(\n type='FFN',\n embed_dims=256,\n feedforward_channels=1024,\n num_fcs=2,\n ffn_drop=0.0,\n act_cfg=dict(type='ReLU', inplace=True),\n with_cp=False),\n operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n init_cfg=None),\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n init_cfg=None),\n enforce_decoder_input_project=False,\n positional_encoding=dict(\n type='SinePositionalEncoding', num_feats=128, normalize=True),\n transformer_decoder=dict(\n type='DetrTransformerDecoder',\n return_intermediate=True,\n num_layers=9,\n transformerlayers=dict(\n type='DetrTransformerDecoderLayer',\n attn_cfgs=dict(\n type='MultiheadAttention',\n embed_dims=256,\n num_heads=8,\n attn_drop=0.0,\n proj_drop=0.0,\n dropout_layer=None,\n batch_first=False),\n ffn_cfgs=dict(\n embed_dims=256,\n feedforward_channels=2048,\n num_fcs=2,\n act_cfg=dict(type='ReLU', inplace=True),\n ffn_drop=0.0,\n dropout_layer=None,\n add_identity=True,\n with_cp=False),\n feedforward_channels=2048,\n operation_order=('cross_attn', 'norm', 'self_attn', 'norm',\n 'ffn', 'norm')),\n init_cfg=None),\n loss_cls=dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=2.0,\n reduction='mean',\n class_weight=[\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1\n ]),\n loss_mask=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=5.0),\n loss_dice=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n reduction='mean',\n naive_dice=True,\n eps=1.0,\n loss_weight=5.0),\n num_classes=19,\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512))),\n train_cfg=dict(\n num_points=12544,\n oversample_ratio=3.0,\n importance_sample_ratio=0.75,\n assigner=dict(\n type='MaskHungarianAssigner',\n cls_cost=dict(type='ClassificationCost', weight=2.0),\n mask_cost=dict(\n type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True),\n dice_cost=dict(\n type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),\n sampler=dict(type='MaskPseudoSampler')),\n test_cfg=dict(\n panoptic_on=True,\n semantic_on=False,\n instance_on=True,\n max_per_image=100,\n iou_thr=0.8,\n filter_low_score=True,\n mode='slide',\n crop_size=(1024, 1024),\n stride=(512, 512)),\n init_cfg=None)\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (1024, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(\n type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255),\n dict(type='ToMask'),\n dict(type='DefaultFormatBundle'),\n dict(\n type='Collect',\n keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='ResizeToMultiple', size_divisor=32),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=1e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=50,\n layer_decay_rate=0.95,\n depths=[6, 6, 32, 6],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=80000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 8)\nauto_resume = False\ndevice = 'cuda'\nseed = 1985235357\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031622.log b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031622.log new file mode 100644 index 0000000000000000000000000000000000000000..572934f7da8bf71a96aca7c36e8922a92f9a9b97 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031622.log @@ -0,0 +1,4441 @@ +2025-05-28 03:16:22,743 - mmseg - INFO - Multi-processing start method is `None` +2025-05-28 03:16:22,744 - mmseg - INFO - OpenCV num_threads is `32 +2025-05-28 03:16:22,744 - mmseg - INFO - OMP num threads is 1 +2025-05-28 03:16:22,777 - mmseg - INFO - Environment info: +------------------------------------------------------------ +sys.platform: linux +Python: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ] +CUDA available: True +GPU 0,1,2,3,4,5,6,7: NVIDIA A100-SXM4-80GB +CUDA_HOME: /usr/local/cuda +NVCC: Cuda compilation tools, release 11.7, V11.7.64 +GCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0 +PyTorch: 1.11.0+cu113 +PyTorch compiling details: PyTorch built with: + - GCC 7.3 + - C++ Version: 201402 + - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications + - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e) + - OpenMP 201511 (a.k.a. OpenMP 4.5) + - LAPACK is enabled (usually provided by MKL) + - NNPACK is enabled + - CPU capability usage: AVX2 + - CUDA Runtime 11.3 + - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86 + - CuDNN 8.2 + - Magma 2.5.2 + - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, + +TorchVision: 0.12.0+cu113 +OpenCV: 4.11.0 +MMCV: 1.5.0 +MMCV Compiler: GCC 7.3 +MMCV CUDA Compiler: 11.3 +MMSegmentation: 0.27.0+12dc934 +------------------------------------------------------------ + +2025-05-28 03:16:22,777 - mmseg - INFO - Distributed training: True +2025-05-28 03:16:23,005 - mmseg - INFO - Config: +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=192, + depths=[5, 5, 24, 5], + groups=[12, 24, 48, 96], + mlp_ratio=4.0, + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=2.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='SegformerHead', + in_channels=[192, 384, 768, 1536], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + train_cfg=dict(), + test_cfg=dict(mode='whole')) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_xl_512x1024_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=2e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=39, + layer_decay_rate=0.94, + depths=[5, 5, 24, 5], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=4000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 8) +auto_resume = False + +2025-05-28 03:16:30,422 - mmseg - INFO - Set random seed to 1742917995, deterministic: False +2025-05-28 03:16:30,423 - mmseg - INFO - using core type: DCNv3 +2025-05-28 03:16:30,423 - mmseg - INFO - using activation layer: GELU +2025-05-28 03:16:30,423 - mmseg - INFO - using main norm layer: LN +2025-05-28 03:16:30,423 - mmseg - INFO - using dpr: linear, 0.4 +2025-05-28 03:16:30,423 - mmseg - INFO - level2_post_norm: False +2025-05-28 03:16:30,423 - mmseg - INFO - level2_post_norm_block_ids: None +2025-05-28 03:16:30,423 - mmseg - INFO - res_post_norm: False +2025-05-28 03:16:30,424 - mmseg - INFO - use_dcn_v4_op: False +2025-05-28 03:16:34,696 - mmseg - WARNING - No pre-trained weights for InternImage, training start from scratch +2025-05-28 03:16:36,977 - mmseg - INFO - initialize SegformerHead with init_cfg {'type': 'Normal', 'std': 0.01, 'override': {'name': 'conv_seg'}} +Name of parameter - Initialization information + +backbone.patch_embed.conv1.weight - torch.Size([96, 3, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.patch_embed.conv1.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.patch_embed.norm1.1.weight - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.patch_embed.norm1.1.bias - torch.Size([96]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.patch_embed.conv2.weight - torch.Size([192, 96, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.patch_embed.conv2.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.patch_embed.norm2.1.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.patch_embed.norm2.1.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.gamma1 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.gamma2 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.norm1.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.norm1.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.dcn.dw_conv.0.weight - torch.Size([192, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.dcn.dw_conv.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.dcn.offset.weight - torch.Size([216, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.offset.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.dcn.mask.weight - torch.Size([108, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.mask.bias - torch.Size([108]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.dcn.input_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.input_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.dcn.output_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.dcn.output_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.norm2.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.norm2.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.mlp.fc1.weight - torch.Size([768, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.0.mlp.fc2.weight - torch.Size([192, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.0.mlp.fc2.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.gamma1 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.gamma2 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.norm1.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.norm1.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.dcn.dw_conv.0.weight - torch.Size([192, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.dcn.dw_conv.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.dcn.offset.weight - torch.Size([216, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.offset.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.dcn.mask.weight - torch.Size([108, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.mask.bias - torch.Size([108]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.dcn.input_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.input_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.dcn.output_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.dcn.output_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.norm2.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.norm2.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.mlp.fc1.weight - torch.Size([768, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.1.mlp.fc2.weight - torch.Size([192, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.1.mlp.fc2.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.gamma1 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.gamma2 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.norm1.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.norm1.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.dcn.dw_conv.0.weight - torch.Size([192, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.dcn.dw_conv.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.dcn.offset.weight - torch.Size([216, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.offset.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.dcn.mask.weight - torch.Size([108, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.mask.bias - torch.Size([108]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.dcn.input_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.input_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.dcn.output_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.dcn.output_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.norm2.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.norm2.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.mlp.fc1.weight - torch.Size([768, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.2.mlp.fc2.weight - torch.Size([192, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.2.mlp.fc2.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.gamma1 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.gamma2 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.norm1.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.norm1.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.dcn.dw_conv.0.weight - torch.Size([192, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.dcn.dw_conv.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.dcn.offset.weight - torch.Size([216, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.offset.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.dcn.mask.weight - torch.Size([108, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.mask.bias - torch.Size([108]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.dcn.input_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.input_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.dcn.output_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.dcn.output_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.norm2.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.norm2.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.mlp.fc1.weight - torch.Size([768, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.3.mlp.fc2.weight - torch.Size([192, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.3.mlp.fc2.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.gamma1 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.gamma2 - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.norm1.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.norm1.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.dcn.dw_conv.0.weight - torch.Size([192, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.dcn.dw_conv.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.dcn.offset.weight - torch.Size([216, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.offset.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.dcn.mask.weight - torch.Size([108, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.mask.bias - torch.Size([108]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.dcn.input_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.input_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.dcn.output_proj.weight - torch.Size([192, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.dcn.output_proj.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.norm2.0.weight - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.norm2.0.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.mlp.fc1.weight - torch.Size([768, 192]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.blocks.4.mlp.fc2.weight - torch.Size([192, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.0.blocks.4.mlp.fc2.bias - torch.Size([192]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.downsample.conv.weight - torch.Size([384, 192, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.downsample.norm.1.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.0.downsample.norm.1.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.gamma1 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.gamma2 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.norm1.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.norm1.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.dcn.dw_conv.0.weight - torch.Size([384, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.dcn.dw_conv.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.dcn.offset.weight - torch.Size([432, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.offset.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.dcn.mask.weight - torch.Size([216, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.mask.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.dcn.input_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.input_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.dcn.output_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.dcn.output_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.norm2.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.norm2.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.mlp.fc1.weight - torch.Size([1536, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.0.mlp.fc2.weight - torch.Size([384, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.0.mlp.fc2.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.gamma1 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.gamma2 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.norm1.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.norm1.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.dcn.dw_conv.0.weight - torch.Size([384, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.dcn.dw_conv.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.dcn.offset.weight - torch.Size([432, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.offset.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.dcn.mask.weight - torch.Size([216, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.mask.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.dcn.input_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.input_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.dcn.output_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.dcn.output_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.norm2.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.norm2.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.mlp.fc1.weight - torch.Size([1536, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.1.mlp.fc2.weight - torch.Size([384, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.1.mlp.fc2.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.gamma1 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.gamma2 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.norm1.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.norm1.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.dcn.dw_conv.0.weight - torch.Size([384, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.dcn.dw_conv.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.dcn.offset.weight - torch.Size([432, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.offset.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.dcn.mask.weight - torch.Size([216, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.mask.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.dcn.input_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.input_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.dcn.output_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.dcn.output_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.norm2.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.norm2.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.mlp.fc1.weight - torch.Size([1536, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.2.mlp.fc2.weight - torch.Size([384, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.2.mlp.fc2.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.gamma1 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.gamma2 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.norm1.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.norm1.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.dcn.dw_conv.0.weight - torch.Size([384, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.dcn.dw_conv.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.dcn.offset.weight - torch.Size([432, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.offset.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.dcn.mask.weight - torch.Size([216, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.mask.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.dcn.input_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.input_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.dcn.output_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.dcn.output_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.norm2.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.norm2.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.mlp.fc1.weight - torch.Size([1536, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.3.mlp.fc2.weight - torch.Size([384, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.3.mlp.fc2.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.gamma1 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.gamma2 - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.norm1.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.norm1.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.dcn.dw_conv.0.weight - torch.Size([384, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.dcn.dw_conv.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.dcn.offset.weight - torch.Size([432, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.offset.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.dcn.mask.weight - torch.Size([216, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.mask.bias - torch.Size([216]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.dcn.input_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.input_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.dcn.output_proj.weight - torch.Size([384, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.dcn.output_proj.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.norm2.0.weight - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.norm2.0.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.mlp.fc1.weight - torch.Size([1536, 384]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.blocks.4.mlp.fc2.weight - torch.Size([384, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.1.blocks.4.mlp.fc2.bias - torch.Size([384]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.downsample.conv.weight - torch.Size([768, 384, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.downsample.norm.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.1.downsample.norm.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.0.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.0.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.1.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.1.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.2.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.2.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.3.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.3.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.4.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.4.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.5.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.5.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.6.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.6.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.7.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.7.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.8.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.8.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.9.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.9.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.10.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.10.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.11.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.11.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.12.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.12.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.13.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.13.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.14.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.14.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.15.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.15.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.16.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.16.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.17.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.17.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.18.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.18.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.19.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.19.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.20.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.20.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.21.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.21.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.22.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.22.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.gamma1 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.gamma2 - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.norm1.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.norm1.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.dcn.dw_conv.0.weight - torch.Size([768, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.dcn.dw_conv.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.dcn.offset.weight - torch.Size([864, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.offset.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.dcn.mask.weight - torch.Size([432, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.mask.bias - torch.Size([432]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.dcn.input_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.input_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.dcn.output_proj.weight - torch.Size([768, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.dcn.output_proj.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.norm2.0.weight - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.norm2.0.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.mlp.fc1.weight - torch.Size([3072, 768]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc1.bias - torch.Size([3072]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.blocks.23.mlp.fc2.weight - torch.Size([768, 3072]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.2.blocks.23.mlp.fc2.bias - torch.Size([768]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.downsample.conv.weight - torch.Size([1536, 768, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.downsample.norm.1.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.2.downsample.norm.1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.gamma1 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.gamma2 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.norm1.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.norm1.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.dcn.dw_conv.0.weight - torch.Size([1536, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.dcn.dw_conv.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.dcn.offset.weight - torch.Size([1728, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.offset.bias - torch.Size([1728]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.dcn.mask.weight - torch.Size([864, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.mask.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.dcn.input_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.input_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.dcn.output_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.dcn.output_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.norm2.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.norm2.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.mlp.fc1.weight - torch.Size([6144, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc1.bias - torch.Size([6144]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.0.mlp.fc2.weight - torch.Size([1536, 6144]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.0.mlp.fc2.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.gamma1 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.gamma2 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.norm1.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.norm1.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.dcn.dw_conv.0.weight - torch.Size([1536, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.dcn.dw_conv.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.dcn.offset.weight - torch.Size([1728, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.offset.bias - torch.Size([1728]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.dcn.mask.weight - torch.Size([864, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.mask.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.dcn.input_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.input_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.dcn.output_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.dcn.output_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.norm2.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.norm2.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.mlp.fc1.weight - torch.Size([6144, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc1.bias - torch.Size([6144]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.1.mlp.fc2.weight - torch.Size([1536, 6144]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.1.mlp.fc2.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.gamma1 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.gamma2 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.norm1.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.norm1.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.dcn.dw_conv.0.weight - torch.Size([1536, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.dcn.dw_conv.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.dcn.offset.weight - torch.Size([1728, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.offset.bias - torch.Size([1728]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.dcn.mask.weight - torch.Size([864, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.mask.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.dcn.input_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.input_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.dcn.output_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.dcn.output_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.norm2.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.norm2.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.mlp.fc1.weight - torch.Size([6144, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc1.bias - torch.Size([6144]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.2.mlp.fc2.weight - torch.Size([1536, 6144]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.2.mlp.fc2.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.gamma1 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.gamma2 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.norm1.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.norm1.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.dcn.dw_conv.0.weight - torch.Size([1536, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.dcn.dw_conv.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.dcn.offset.weight - torch.Size([1728, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.offset.bias - torch.Size([1728]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.dcn.mask.weight - torch.Size([864, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.mask.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.dcn.input_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.input_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.dcn.output_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.dcn.output_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.norm2.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.norm2.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.mlp.fc1.weight - torch.Size([6144, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc1.bias - torch.Size([6144]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.3.mlp.fc2.weight - torch.Size([1536, 6144]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.3.mlp.fc2.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.gamma1 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.gamma2 - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.norm1.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.norm1.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.dcn.dw_conv.0.weight - torch.Size([1536, 1, 3, 3]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.dcn.dw_conv.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.dcn.offset.weight - torch.Size([1728, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.offset.bias - torch.Size([1728]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.dcn.mask.weight - torch.Size([864, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.mask.bias - torch.Size([864]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.dcn.input_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.input_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.dcn.output_proj.weight - torch.Size([1536, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.dcn.output_proj.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.norm2.0.weight - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.norm2.0.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.mlp.fc1.weight - torch.Size([6144, 1536]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc1.bias - torch.Size([6144]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +backbone.levels.3.blocks.4.mlp.fc2.weight - torch.Size([1536, 6144]): +Initialized by user-defined `init_weights` in InternImage + +backbone.levels.3.blocks.4.mlp.fc2.bias - torch.Size([1536]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.conv_seg.weight - torch.Size([150, 256, 1, 1]): +NormalInit: mean=0, std=0.01, bias=0 + +decode_head.conv_seg.bias - torch.Size([150]): +NormalInit: mean=0, std=0.01, bias=0 + +decode_head.convs.0.conv.weight - torch.Size([256, 192, 1, 1]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.0.bn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.0.bn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.1.conv.weight - torch.Size([256, 384, 1, 1]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.1.bn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.1.bn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.2.conv.weight - torch.Size([256, 768, 1, 1]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.2.bn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.2.bn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.3.conv.weight - torch.Size([256, 1536, 1, 1]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.3.bn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.convs.3.bn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.fusion_conv.conv.weight - torch.Size([256, 1024, 1, 1]): +Initialized by user-defined `init_weights` in ConvModule + +decode_head.fusion_conv.bn.weight - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder + +decode_head.fusion_conv.bn.bias - torch.Size([256]): +The value is the same before and after calling `init_weights` of EncoderDecoder +2025-05-28 03:16:36,987 - mmseg - INFO - Loaded 2316 images +2025-05-28 03:16:36,988 - mmseg - INFO - Built training dataset from config: {'type': 'CityscapesDataset', 'data_root': '/pasteur/u/yiming/homework4/cityscapes', 'img_dir': 'leftImg8bit/', 'ann_dir': 'gtFine/', 'pipeline': [{'type': 'LoadImageFromFile'}, {'type': 'LoadAnnotations'}, {'type': 'Resize', 'img_scale': (2048, 1024), 'ratio_range': (0.5, 2.0)}, {'type': 'RandomCrop', 'crop_size': (512, 1024), 'cat_max_ratio': 0.75}, {'type': 'RandomFlip', 'prob': 0.5}, {'type': 'PhotoMetricDistortion'}, {'type': 'Normalize', 'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True}, {'type': 'Pad', 'size': (512, 1024), 'pad_val': 0, 'seg_pad_val': 255}, {'type': 'DefaultFormatBundle'}, {'type': 'Collect', 'keys': ['img', 'gt_semantic_seg']}], 'split': 'splits/fold_1_train_split.txt'} +2025-05-28 03:16:36,988 - mmseg - INFO - Number of samples in training dataset: 2316 +2025-05-28 03:16:37,593 - mmseg - INFO - {'num_layers': 39, 'layer_decay_rate': 0.94, 'depths': [5, 5, 24, 5], 'offset_lr_scale': 1.0} +2025-05-28 03:16:37,593 - mmseg - INFO - Build CustomLayerDecayOptimizerConstructor 0.940000 - 41 +2025-05-28 03:16:37,598 - mmseg - INFO - Param groups = { + "layer_0_decay": { + "param_names": [ + "backbone.patch_embed.conv1.weight", + "backbone.patch_embed.conv2.weight" + ], + "lr_scale": 0.08416163114342567, + "lr": 1.6832326228685137e-06, + "weight_decay": 0.05 + }, + "layer_0_no_decay": { + "param_names": [ + "backbone.patch_embed.conv1.bias", + "backbone.patch_embed.norm1.1.weight", + "backbone.patch_embed.norm1.1.bias", + "backbone.patch_embed.conv2.bias", + "backbone.patch_embed.norm2.1.weight", + "backbone.patch_embed.norm2.1.bias" + ], + "lr_scale": 0.08416163114342567, + "lr": 1.6832326228685137e-06, + "weight_decay": 0.0 + }, + "layer_1_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.gamma1", + "backbone.levels.0.blocks.0.gamma2", + "backbone.levels.0.blocks.0.norm1.0.weight", + "backbone.levels.0.blocks.0.norm1.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.0.dcn.offset.bias", + "backbone.levels.0.blocks.0.dcn.mask.bias", + "backbone.levels.0.blocks.0.dcn.input_proj.bias", + "backbone.levels.0.blocks.0.dcn.output_proj.bias", + "backbone.levels.0.blocks.0.norm2.0.weight", + "backbone.levels.0.blocks.0.norm2.0.bias", + "backbone.levels.0.blocks.0.mlp.fc1.bias", + "backbone.levels.0.blocks.0.mlp.fc2.bias" + ], + "lr_scale": 0.08953365015258051, + "lr": 1.7906730030516104e-06, + "weight_decay": 0.0 + }, + "layer_1_decay": { + "param_names": [ + "backbone.levels.0.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.0.dcn.offset.weight", + "backbone.levels.0.blocks.0.dcn.mask.weight", + "backbone.levels.0.blocks.0.dcn.input_proj.weight", + "backbone.levels.0.blocks.0.dcn.output_proj.weight", + "backbone.levels.0.blocks.0.mlp.fc1.weight", + "backbone.levels.0.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.08953365015258051, + "lr": 1.7906730030516104e-06, + "weight_decay": 0.05 + }, + "layer_2_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.gamma1", + "backbone.levels.0.blocks.1.gamma2", + "backbone.levels.0.blocks.1.norm1.0.weight", + "backbone.levels.0.blocks.1.norm1.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.1.dcn.offset.bias", + "backbone.levels.0.blocks.1.dcn.mask.bias", + "backbone.levels.0.blocks.1.dcn.input_proj.bias", + "backbone.levels.0.blocks.1.dcn.output_proj.bias", + "backbone.levels.0.blocks.1.norm2.0.weight", + "backbone.levels.0.blocks.1.norm2.0.bias", + "backbone.levels.0.blocks.1.mlp.fc1.bias", + "backbone.levels.0.blocks.1.mlp.fc2.bias" + ], + "lr_scale": 0.09524856399210693, + "lr": 1.9049712798421389e-06, + "weight_decay": 0.0 + }, + "layer_2_decay": { + "param_names": [ + "backbone.levels.0.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.1.dcn.offset.weight", + "backbone.levels.0.blocks.1.dcn.mask.weight", + "backbone.levels.0.blocks.1.dcn.input_proj.weight", + "backbone.levels.0.blocks.1.dcn.output_proj.weight", + "backbone.levels.0.blocks.1.mlp.fc1.weight", + "backbone.levels.0.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.09524856399210693, + "lr": 1.9049712798421389e-06, + "weight_decay": 0.05 + }, + "layer_3_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.gamma1", + "backbone.levels.0.blocks.2.gamma2", + "backbone.levels.0.blocks.2.norm1.0.weight", + "backbone.levels.0.blocks.2.norm1.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.2.dcn.offset.bias", + "backbone.levels.0.blocks.2.dcn.mask.bias", + "backbone.levels.0.blocks.2.dcn.input_proj.bias", + "backbone.levels.0.blocks.2.dcn.output_proj.bias", + "backbone.levels.0.blocks.2.norm2.0.weight", + "backbone.levels.0.blocks.2.norm2.0.bias", + "backbone.levels.0.blocks.2.mlp.fc1.bias", + "backbone.levels.0.blocks.2.mlp.fc2.bias" + ], + "lr_scale": 0.10132825956607122, + "lr": 2.0265651913214247e-06, + "weight_decay": 0.0 + }, + "layer_3_decay": { + "param_names": [ + "backbone.levels.0.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.2.dcn.offset.weight", + "backbone.levels.0.blocks.2.dcn.mask.weight", + "backbone.levels.0.blocks.2.dcn.input_proj.weight", + "backbone.levels.0.blocks.2.dcn.output_proj.weight", + "backbone.levels.0.blocks.2.mlp.fc1.weight", + "backbone.levels.0.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.10132825956607122, + "lr": 2.0265651913214247e-06, + "weight_decay": 0.05 + }, + "layer_4_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.gamma1", + "backbone.levels.0.blocks.3.gamma2", + "backbone.levels.0.blocks.3.norm1.0.weight", + "backbone.levels.0.blocks.3.norm1.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.3.dcn.offset.bias", + "backbone.levels.0.blocks.3.dcn.mask.bias", + "backbone.levels.0.blocks.3.dcn.input_proj.bias", + "backbone.levels.0.blocks.3.dcn.output_proj.bias", + "backbone.levels.0.blocks.3.norm2.0.weight", + "backbone.levels.0.blocks.3.norm2.0.bias", + "backbone.levels.0.blocks.3.mlp.fc1.bias", + "backbone.levels.0.blocks.3.mlp.fc2.bias" + ], + "lr_scale": 0.10779602081496939, + "lr": 2.155920416299388e-06, + "weight_decay": 0.0 + }, + "layer_4_decay": { + "param_names": [ + "backbone.levels.0.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.3.dcn.offset.weight", + "backbone.levels.0.blocks.3.dcn.mask.weight", + "backbone.levels.0.blocks.3.dcn.input_proj.weight", + "backbone.levels.0.blocks.3.dcn.output_proj.weight", + "backbone.levels.0.blocks.3.mlp.fc1.weight", + "backbone.levels.0.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.10779602081496939, + "lr": 2.155920416299388e-06, + "weight_decay": 0.05 + }, + "layer_5_no_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.gamma1", + "backbone.levels.0.blocks.4.gamma2", + "backbone.levels.0.blocks.4.norm1.0.weight", + "backbone.levels.0.blocks.4.norm1.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.0.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.0.blocks.4.dcn.offset.bias", + "backbone.levels.0.blocks.4.dcn.mask.bias", + "backbone.levels.0.blocks.4.dcn.input_proj.bias", + "backbone.levels.0.blocks.4.dcn.output_proj.bias", + "backbone.levels.0.blocks.4.norm2.0.weight", + "backbone.levels.0.blocks.4.norm2.0.bias", + "backbone.levels.0.blocks.4.mlp.fc1.bias", + "backbone.levels.0.blocks.4.mlp.fc2.bias" + ], + "lr_scale": 0.11467661788826532, + "lr": 2.2935323577653067e-06, + "weight_decay": 0.0 + }, + "layer_5_decay": { + "param_names": [ + "backbone.levels.0.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.0.blocks.4.dcn.offset.weight", + "backbone.levels.0.blocks.4.dcn.mask.weight", + "backbone.levels.0.blocks.4.dcn.input_proj.weight", + "backbone.levels.0.blocks.4.dcn.output_proj.weight", + "backbone.levels.0.blocks.4.mlp.fc1.weight", + "backbone.levels.0.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.11467661788826532, + "lr": 2.2935323577653067e-06, + "weight_decay": 0.05 + }, + "layer_6_decay": { + "param_names": [ + "backbone.levels.0.downsample.conv.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.0.dcn.offset.weight", + "backbone.levels.1.blocks.0.dcn.mask.weight", + "backbone.levels.1.blocks.0.dcn.input_proj.weight", + "backbone.levels.1.blocks.0.dcn.output_proj.weight", + "backbone.levels.1.blocks.0.mlp.fc1.weight", + "backbone.levels.1.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.12199640200879289, + "lr": 2.439928040175858e-06, + "weight_decay": 0.05 + }, + "layer_6_no_decay": { + "param_names": [ + "backbone.levels.0.downsample.norm.1.weight", + "backbone.levels.0.downsample.norm.1.bias", + "backbone.levels.1.blocks.0.gamma1", + "backbone.levels.1.blocks.0.gamma2", + "backbone.levels.1.blocks.0.norm1.0.weight", + "backbone.levels.1.blocks.0.norm1.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.0.dcn.offset.bias", + "backbone.levels.1.blocks.0.dcn.mask.bias", + "backbone.levels.1.blocks.0.dcn.input_proj.bias", + "backbone.levels.1.blocks.0.dcn.output_proj.bias", + "backbone.levels.1.blocks.0.norm2.0.weight", + "backbone.levels.1.blocks.0.norm2.0.bias", + "backbone.levels.1.blocks.0.mlp.fc1.bias", + "backbone.levels.1.blocks.0.mlp.fc2.bias" + ], + "lr_scale": 0.12199640200879289, + "lr": 2.439928040175858e-06, + "weight_decay": 0.0 + }, + "layer_7_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.gamma1", + "backbone.levels.1.blocks.1.gamma2", + "backbone.levels.1.blocks.1.norm1.0.weight", + "backbone.levels.1.blocks.1.norm1.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.1.dcn.offset.bias", + "backbone.levels.1.blocks.1.dcn.mask.bias", + "backbone.levels.1.blocks.1.dcn.input_proj.bias", + "backbone.levels.1.blocks.1.dcn.output_proj.bias", + "backbone.levels.1.blocks.1.norm2.0.weight", + "backbone.levels.1.blocks.1.norm2.0.bias", + "backbone.levels.1.blocks.1.mlp.fc1.bias", + "backbone.levels.1.blocks.1.mlp.fc2.bias" + ], + "lr_scale": 0.12978340639233288, + "lr": 2.595668127846658e-06, + "weight_decay": 0.0 + }, + "layer_7_decay": { + "param_names": [ + "backbone.levels.1.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.1.dcn.offset.weight", + "backbone.levels.1.blocks.1.dcn.mask.weight", + "backbone.levels.1.blocks.1.dcn.input_proj.weight", + "backbone.levels.1.blocks.1.dcn.output_proj.weight", + "backbone.levels.1.blocks.1.mlp.fc1.weight", + "backbone.levels.1.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.12978340639233288, + "lr": 2.595668127846658e-06, + "weight_decay": 0.05 + }, + "layer_8_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.gamma1", + "backbone.levels.1.blocks.2.gamma2", + "backbone.levels.1.blocks.2.norm1.0.weight", + "backbone.levels.1.blocks.2.norm1.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.2.dcn.offset.bias", + "backbone.levels.1.blocks.2.dcn.mask.bias", + "backbone.levels.1.blocks.2.dcn.input_proj.bias", + "backbone.levels.1.blocks.2.dcn.output_proj.bias", + "backbone.levels.1.blocks.2.norm2.0.weight", + "backbone.levels.1.blocks.2.norm2.0.bias", + "backbone.levels.1.blocks.2.mlp.fc1.bias", + "backbone.levels.1.blocks.2.mlp.fc2.bias" + ], + "lr_scale": 0.13806745360886477, + "lr": 2.7613490721772957e-06, + "weight_decay": 0.0 + }, + "layer_8_decay": { + "param_names": [ + "backbone.levels.1.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.2.dcn.offset.weight", + "backbone.levels.1.blocks.2.dcn.mask.weight", + "backbone.levels.1.blocks.2.dcn.input_proj.weight", + "backbone.levels.1.blocks.2.dcn.output_proj.weight", + "backbone.levels.1.blocks.2.mlp.fc1.weight", + "backbone.levels.1.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.13806745360886477, + "lr": 2.7613490721772957e-06, + "weight_decay": 0.05 + }, + "layer_9_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.gamma1", + "backbone.levels.1.blocks.3.gamma2", + "backbone.levels.1.blocks.3.norm1.0.weight", + "backbone.levels.1.blocks.3.norm1.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.3.dcn.offset.bias", + "backbone.levels.1.blocks.3.dcn.mask.bias", + "backbone.levels.1.blocks.3.dcn.input_proj.bias", + "backbone.levels.1.blocks.3.dcn.output_proj.bias", + "backbone.levels.1.blocks.3.norm2.0.weight", + "backbone.levels.1.blocks.3.norm2.0.bias", + "backbone.levels.1.blocks.3.mlp.fc1.bias", + "backbone.levels.1.blocks.3.mlp.fc2.bias" + ], + "lr_scale": 0.14688026979666466, + "lr": 2.9376053959332933e-06, + "weight_decay": 0.0 + }, + "layer_9_decay": { + "param_names": [ + "backbone.levels.1.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.3.dcn.offset.weight", + "backbone.levels.1.blocks.3.dcn.mask.weight", + "backbone.levels.1.blocks.3.dcn.input_proj.weight", + "backbone.levels.1.blocks.3.dcn.output_proj.weight", + "backbone.levels.1.blocks.3.mlp.fc1.weight", + "backbone.levels.1.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.14688026979666466, + "lr": 2.9376053959332933e-06, + "weight_decay": 0.05 + }, + "layer_10_no_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.gamma1", + "backbone.levels.1.blocks.4.gamma2", + "backbone.levels.1.blocks.4.norm1.0.weight", + "backbone.levels.1.blocks.4.norm1.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.1.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.1.blocks.4.dcn.offset.bias", + "backbone.levels.1.blocks.4.dcn.mask.bias", + "backbone.levels.1.blocks.4.dcn.input_proj.bias", + "backbone.levels.1.blocks.4.dcn.output_proj.bias", + "backbone.levels.1.blocks.4.norm2.0.weight", + "backbone.levels.1.blocks.4.norm2.0.bias", + "backbone.levels.1.blocks.4.mlp.fc1.bias", + "backbone.levels.1.blocks.4.mlp.fc2.bias" + ], + "lr_scale": 0.15625560616666453, + "lr": 3.125112123333291e-06, + "weight_decay": 0.0 + }, + "layer_10_decay": { + "param_names": [ + "backbone.levels.1.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.1.blocks.4.dcn.offset.weight", + "backbone.levels.1.blocks.4.dcn.mask.weight", + "backbone.levels.1.blocks.4.dcn.input_proj.weight", + "backbone.levels.1.blocks.4.dcn.output_proj.weight", + "backbone.levels.1.blocks.4.mlp.fc1.weight", + "backbone.levels.1.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.15625560616666453, + "lr": 3.125112123333291e-06, + "weight_decay": 0.05 + }, + "layer_11_decay": { + "param_names": [ + "backbone.levels.1.downsample.conv.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.0.dcn.offset.weight", + "backbone.levels.2.blocks.0.dcn.mask.weight", + "backbone.levels.2.blocks.0.dcn.input_proj.weight", + "backbone.levels.2.blocks.0.dcn.output_proj.weight", + "backbone.levels.2.blocks.0.mlp.fc1.weight", + "backbone.levels.2.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.16622936826240908, + "lr": 3.324587365248182e-06, + "weight_decay": 0.05 + }, + "layer_11_no_decay": { + "param_names": [ + "backbone.levels.1.downsample.norm.1.weight", + "backbone.levels.1.downsample.norm.1.bias", + "backbone.levels.2.blocks.0.gamma1", + "backbone.levels.2.blocks.0.gamma2", + "backbone.levels.2.blocks.0.norm1.0.weight", + "backbone.levels.2.blocks.0.norm1.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.0.dcn.offset.bias", + "backbone.levels.2.blocks.0.dcn.mask.bias", + "backbone.levels.2.blocks.0.dcn.input_proj.bias", + "backbone.levels.2.blocks.0.dcn.output_proj.bias", + "backbone.levels.2.blocks.0.norm2.0.weight", + "backbone.levels.2.blocks.0.norm2.0.bias", + "backbone.levels.2.blocks.0.mlp.fc1.bias", + "backbone.levels.2.blocks.0.mlp.fc2.bias" + ], + "lr_scale": 0.16622936826240908, + "lr": 3.324587365248182e-06, + "weight_decay": 0.0 + }, + "layer_12_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.gamma1", + "backbone.levels.2.blocks.1.gamma2", + "backbone.levels.2.blocks.1.norm1.0.weight", + "backbone.levels.2.blocks.1.norm1.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.1.dcn.offset.bias", + "backbone.levels.2.blocks.1.dcn.mask.bias", + "backbone.levels.2.blocks.1.dcn.input_proj.bias", + "backbone.levels.2.blocks.1.dcn.output_proj.bias", + "backbone.levels.2.blocks.1.norm2.0.weight", + "backbone.levels.2.blocks.1.norm2.0.bias", + "backbone.levels.2.blocks.1.mlp.fc1.bias", + "backbone.levels.2.blocks.1.mlp.fc2.bias" + ], + "lr_scale": 0.17683975347064798, + "lr": 3.53679506941296e-06, + "weight_decay": 0.0 + }, + "layer_12_decay": { + "param_names": [ + "backbone.levels.2.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.1.dcn.offset.weight", + "backbone.levels.2.blocks.1.dcn.mask.weight", + "backbone.levels.2.blocks.1.dcn.input_proj.weight", + "backbone.levels.2.blocks.1.dcn.output_proj.weight", + "backbone.levels.2.blocks.1.mlp.fc1.weight", + "backbone.levels.2.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.17683975347064798, + "lr": 3.53679506941296e-06, + "weight_decay": 0.05 + }, + "layer_13_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.gamma1", + "backbone.levels.2.blocks.2.gamma2", + "backbone.levels.2.blocks.2.norm1.0.weight", + "backbone.levels.2.blocks.2.norm1.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.2.dcn.offset.bias", + "backbone.levels.2.blocks.2.dcn.mask.bias", + "backbone.levels.2.blocks.2.dcn.input_proj.bias", + "backbone.levels.2.blocks.2.dcn.output_proj.bias", + "backbone.levels.2.blocks.2.norm2.0.weight", + "backbone.levels.2.blocks.2.norm2.0.bias", + "backbone.levels.2.blocks.2.mlp.fc1.bias", + "backbone.levels.2.blocks.2.mlp.fc2.bias" + ], + "lr_scale": 0.18812739730919997, + "lr": 3.7625479461839997e-06, + "weight_decay": 0.0 + }, + "layer_13_decay": { + "param_names": [ + "backbone.levels.2.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.2.dcn.offset.weight", + "backbone.levels.2.blocks.2.dcn.mask.weight", + "backbone.levels.2.blocks.2.dcn.input_proj.weight", + "backbone.levels.2.blocks.2.dcn.output_proj.weight", + "backbone.levels.2.blocks.2.mlp.fc1.weight", + "backbone.levels.2.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.18812739730919997, + "lr": 3.7625479461839997e-06, + "weight_decay": 0.05 + }, + "layer_14_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.gamma1", + "backbone.levels.2.blocks.3.gamma2", + "backbone.levels.2.blocks.3.norm1.0.weight", + "backbone.levels.2.blocks.3.norm1.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.3.dcn.offset.bias", + "backbone.levels.2.blocks.3.dcn.mask.bias", + "backbone.levels.2.blocks.3.dcn.input_proj.bias", + "backbone.levels.2.blocks.3.dcn.output_proj.bias", + "backbone.levels.2.blocks.3.norm2.0.weight", + "backbone.levels.2.blocks.3.norm2.0.bias", + "backbone.levels.2.blocks.3.mlp.fc1.bias", + "backbone.levels.2.blocks.3.mlp.fc2.bias" + ], + "lr_scale": 0.20013552905234042, + "lr": 4.002710581046809e-06, + "weight_decay": 0.0 + }, + "layer_14_decay": { + "param_names": [ + "backbone.levels.2.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.3.dcn.offset.weight", + "backbone.levels.2.blocks.3.dcn.mask.weight", + "backbone.levels.2.blocks.3.dcn.input_proj.weight", + "backbone.levels.2.blocks.3.dcn.output_proj.weight", + "backbone.levels.2.blocks.3.mlp.fc1.weight", + "backbone.levels.2.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.20013552905234042, + "lr": 4.002710581046809e-06, + "weight_decay": 0.05 + }, + "layer_15_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.gamma1", + "backbone.levels.2.blocks.4.gamma2", + "backbone.levels.2.blocks.4.norm1.0.weight", + "backbone.levels.2.blocks.4.norm1.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.4.dcn.offset.bias", + "backbone.levels.2.blocks.4.dcn.mask.bias", + "backbone.levels.2.blocks.4.dcn.input_proj.bias", + "backbone.levels.2.blocks.4.dcn.output_proj.bias", + "backbone.levels.2.blocks.4.norm2.0.weight", + "backbone.levels.2.blocks.4.norm2.0.bias", + "backbone.levels.2.blocks.4.mlp.fc1.bias", + "backbone.levels.2.blocks.4.mlp.fc2.bias" + ], + "lr_scale": 0.21291013728972386, + "lr": 4.258202745794477e-06, + "weight_decay": 0.0 + }, + "layer_15_decay": { + "param_names": [ + "backbone.levels.2.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.4.dcn.offset.weight", + "backbone.levels.2.blocks.4.dcn.mask.weight", + "backbone.levels.2.blocks.4.dcn.input_proj.weight", + "backbone.levels.2.blocks.4.dcn.output_proj.weight", + "backbone.levels.2.blocks.4.mlp.fc1.weight", + "backbone.levels.2.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.21291013728972386, + "lr": 4.258202745794477e-06, + "weight_decay": 0.05 + }, + "layer_16_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.gamma1", + "backbone.levels.2.blocks.5.gamma2", + "backbone.levels.2.blocks.5.norm1.0.weight", + "backbone.levels.2.blocks.5.norm1.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.5.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.5.dcn.offset.bias", + "backbone.levels.2.blocks.5.dcn.mask.bias", + "backbone.levels.2.blocks.5.dcn.input_proj.bias", + "backbone.levels.2.blocks.5.dcn.output_proj.bias", + "backbone.levels.2.blocks.5.norm2.0.weight", + "backbone.levels.2.blocks.5.norm2.0.bias", + "backbone.levels.2.blocks.5.mlp.fc1.bias", + "backbone.levels.2.blocks.5.mlp.fc2.bias" + ], + "lr_scale": 0.22650014605289773, + "lr": 4.5300029210579546e-06, + "weight_decay": 0.0 + }, + "layer_16_decay": { + "param_names": [ + "backbone.levels.2.blocks.5.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.5.dcn.offset.weight", + "backbone.levels.2.blocks.5.dcn.mask.weight", + "backbone.levels.2.blocks.5.dcn.input_proj.weight", + "backbone.levels.2.blocks.5.dcn.output_proj.weight", + "backbone.levels.2.blocks.5.mlp.fc1.weight", + "backbone.levels.2.blocks.5.mlp.fc2.weight" + ], + "lr_scale": 0.22650014605289773, + "lr": 4.5300029210579546e-06, + "weight_decay": 0.05 + }, + "layer_17_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.gamma1", + "backbone.levels.2.blocks.6.gamma2", + "backbone.levels.2.blocks.6.norm1.0.weight", + "backbone.levels.2.blocks.6.norm1.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.6.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.6.dcn.offset.bias", + "backbone.levels.2.blocks.6.dcn.mask.bias", + "backbone.levels.2.blocks.6.dcn.input_proj.bias", + "backbone.levels.2.blocks.6.dcn.output_proj.bias", + "backbone.levels.2.blocks.6.norm2.0.weight", + "backbone.levels.2.blocks.6.norm2.0.bias", + "backbone.levels.2.blocks.6.mlp.fc1.bias", + "backbone.levels.2.blocks.6.mlp.fc2.bias" + ], + "lr_scale": 0.24095760218393378, + "lr": 4.819152043678676e-06, + "weight_decay": 0.0 + }, + "layer_17_decay": { + "param_names": [ + "backbone.levels.2.blocks.6.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.6.dcn.offset.weight", + "backbone.levels.2.blocks.6.dcn.mask.weight", + "backbone.levels.2.blocks.6.dcn.input_proj.weight", + "backbone.levels.2.blocks.6.dcn.output_proj.weight", + "backbone.levels.2.blocks.6.mlp.fc1.weight", + "backbone.levels.2.blocks.6.mlp.fc2.weight" + ], + "lr_scale": 0.24095760218393378, + "lr": 4.819152043678676e-06, + "weight_decay": 0.05 + }, + "layer_18_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.gamma1", + "backbone.levels.2.blocks.7.gamma2", + "backbone.levels.2.blocks.7.norm1.0.weight", + "backbone.levels.2.blocks.7.norm1.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.7.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.7.dcn.offset.bias", + "backbone.levels.2.blocks.7.dcn.mask.bias", + "backbone.levels.2.blocks.7.dcn.input_proj.bias", + "backbone.levels.2.blocks.7.dcn.output_proj.bias", + "backbone.levels.2.blocks.7.norm2.0.weight", + "backbone.levels.2.blocks.7.norm2.0.bias", + "backbone.levels.2.blocks.7.mlp.fc1.bias", + "backbone.levels.2.blocks.7.mlp.fc2.bias" + ], + "lr_scale": 0.25633787466375935, + "lr": 5.126757493275187e-06, + "weight_decay": 0.0 + }, + "layer_18_decay": { + "param_names": [ + "backbone.levels.2.blocks.7.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.7.dcn.offset.weight", + "backbone.levels.2.blocks.7.dcn.mask.weight", + "backbone.levels.2.blocks.7.dcn.input_proj.weight", + "backbone.levels.2.blocks.7.dcn.output_proj.weight", + "backbone.levels.2.blocks.7.mlp.fc1.weight", + "backbone.levels.2.blocks.7.mlp.fc2.weight" + ], + "lr_scale": 0.25633787466375935, + "lr": 5.126757493275187e-06, + "weight_decay": 0.05 + }, + "layer_19_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.gamma1", + "backbone.levels.2.blocks.8.gamma2", + "backbone.levels.2.blocks.8.norm1.0.weight", + "backbone.levels.2.blocks.8.norm1.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.8.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.8.dcn.offset.bias", + "backbone.levels.2.blocks.8.dcn.mask.bias", + "backbone.levels.2.blocks.8.dcn.input_proj.bias", + "backbone.levels.2.blocks.8.dcn.output_proj.bias", + "backbone.levels.2.blocks.8.norm2.0.weight", + "backbone.levels.2.blocks.8.norm2.0.bias", + "backbone.levels.2.blocks.8.mlp.fc1.bias", + "backbone.levels.2.blocks.8.mlp.fc2.bias" + ], + "lr_scale": 0.27269986666357376, + "lr": 5.453997333271476e-06, + "weight_decay": 0.0 + }, + "layer_19_decay": { + "param_names": [ + "backbone.levels.2.blocks.8.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.8.dcn.offset.weight", + "backbone.levels.2.blocks.8.dcn.mask.weight", + "backbone.levels.2.blocks.8.dcn.input_proj.weight", + "backbone.levels.2.blocks.8.dcn.output_proj.weight", + "backbone.levels.2.blocks.8.mlp.fc1.weight", + "backbone.levels.2.blocks.8.mlp.fc2.weight" + ], + "lr_scale": 0.27269986666357376, + "lr": 5.453997333271476e-06, + "weight_decay": 0.05 + }, + "layer_20_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.gamma1", + "backbone.levels.2.blocks.9.gamma2", + "backbone.levels.2.blocks.9.norm1.0.weight", + "backbone.levels.2.blocks.9.norm1.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.9.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.9.dcn.offset.bias", + "backbone.levels.2.blocks.9.dcn.mask.bias", + "backbone.levels.2.blocks.9.dcn.input_proj.bias", + "backbone.levels.2.blocks.9.dcn.output_proj.bias", + "backbone.levels.2.blocks.9.norm2.0.weight", + "backbone.levels.2.blocks.9.norm2.0.bias", + "backbone.levels.2.blocks.9.mlp.fc1.bias", + "backbone.levels.2.blocks.9.mlp.fc2.bias" + ], + "lr_scale": 0.2901062411314615, + "lr": 5.802124822629231e-06, + "weight_decay": 0.0 + }, + "layer_20_decay": { + "param_names": [ + "backbone.levels.2.blocks.9.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.9.dcn.offset.weight", + "backbone.levels.2.blocks.9.dcn.mask.weight", + "backbone.levels.2.blocks.9.dcn.input_proj.weight", + "backbone.levels.2.blocks.9.dcn.output_proj.weight", + "backbone.levels.2.blocks.9.mlp.fc1.weight", + "backbone.levels.2.blocks.9.mlp.fc2.weight" + ], + "lr_scale": 0.2901062411314615, + "lr": 5.802124822629231e-06, + "weight_decay": 0.05 + }, + "layer_21_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.gamma1", + "backbone.levels.2.blocks.10.gamma2", + "backbone.levels.2.blocks.10.norm1.0.weight", + "backbone.levels.2.blocks.10.norm1.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.10.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.10.dcn.offset.bias", + "backbone.levels.2.blocks.10.dcn.mask.bias", + "backbone.levels.2.blocks.10.dcn.input_proj.bias", + "backbone.levels.2.blocks.10.dcn.output_proj.bias", + "backbone.levels.2.blocks.10.norm2.0.weight", + "backbone.levels.2.blocks.10.norm2.0.bias", + "backbone.levels.2.blocks.10.mlp.fc1.bias", + "backbone.levels.2.blocks.10.mlp.fc2.bias" + ], + "lr_scale": 0.30862366077815057, + "lr": 6.1724732155630115e-06, + "weight_decay": 0.0 + }, + "layer_21_decay": { + "param_names": [ + "backbone.levels.2.blocks.10.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.10.dcn.offset.weight", + "backbone.levels.2.blocks.10.dcn.mask.weight", + "backbone.levels.2.blocks.10.dcn.input_proj.weight", + "backbone.levels.2.blocks.10.dcn.output_proj.weight", + "backbone.levels.2.blocks.10.mlp.fc1.weight", + "backbone.levels.2.blocks.10.mlp.fc2.weight" + ], + "lr_scale": 0.30862366077815057, + "lr": 6.1724732155630115e-06, + "weight_decay": 0.05 + }, + "layer_22_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.gamma1", + "backbone.levels.2.blocks.11.gamma2", + "backbone.levels.2.blocks.11.norm1.0.weight", + "backbone.levels.2.blocks.11.norm1.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.11.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.11.dcn.offset.bias", + "backbone.levels.2.blocks.11.dcn.mask.bias", + "backbone.levels.2.blocks.11.dcn.input_proj.bias", + "backbone.levels.2.blocks.11.dcn.output_proj.bias", + "backbone.levels.2.blocks.11.norm2.0.weight", + "backbone.levels.2.blocks.11.norm2.0.bias", + "backbone.levels.2.blocks.11.mlp.fc1.bias", + "backbone.levels.2.blocks.11.mlp.fc2.bias" + ], + "lr_scale": 0.32832304338101126, + "lr": 6.566460867620226e-06, + "weight_decay": 0.0 + }, + "layer_22_decay": { + "param_names": [ + "backbone.levels.2.blocks.11.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.11.dcn.offset.weight", + "backbone.levels.2.blocks.11.dcn.mask.weight", + "backbone.levels.2.blocks.11.dcn.input_proj.weight", + "backbone.levels.2.blocks.11.dcn.output_proj.weight", + "backbone.levels.2.blocks.11.mlp.fc1.weight", + "backbone.levels.2.blocks.11.mlp.fc2.weight" + ], + "lr_scale": 0.32832304338101126, + "lr": 6.566460867620226e-06, + "weight_decay": 0.05 + }, + "layer_23_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.gamma1", + "backbone.levels.2.blocks.12.gamma2", + "backbone.levels.2.blocks.12.norm1.0.weight", + "backbone.levels.2.blocks.12.norm1.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.12.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.12.dcn.offset.bias", + "backbone.levels.2.blocks.12.dcn.mask.bias", + "backbone.levels.2.blocks.12.dcn.input_proj.bias", + "backbone.levels.2.blocks.12.dcn.output_proj.bias", + "backbone.levels.2.blocks.12.norm2.0.weight", + "backbone.levels.2.blocks.12.norm2.0.bias", + "backbone.levels.2.blocks.12.mlp.fc1.bias", + "backbone.levels.2.blocks.12.mlp.fc2.bias" + ], + "lr_scale": 0.34927983338405455, + "lr": 6.985596667681092e-06, + "weight_decay": 0.0 + }, + "layer_23_decay": { + "param_names": [ + "backbone.levels.2.blocks.12.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.12.dcn.offset.weight", + "backbone.levels.2.blocks.12.dcn.mask.weight", + "backbone.levels.2.blocks.12.dcn.input_proj.weight", + "backbone.levels.2.blocks.12.dcn.output_proj.weight", + "backbone.levels.2.blocks.12.mlp.fc1.weight", + "backbone.levels.2.blocks.12.mlp.fc2.weight" + ], + "lr_scale": 0.34927983338405455, + "lr": 6.985596667681092e-06, + "weight_decay": 0.05 + }, + "layer_24_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.gamma1", + "backbone.levels.2.blocks.13.gamma2", + "backbone.levels.2.blocks.13.norm1.0.weight", + "backbone.levels.2.blocks.13.norm1.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.13.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.13.dcn.offset.bias", + "backbone.levels.2.blocks.13.dcn.mask.bias", + "backbone.levels.2.blocks.13.dcn.input_proj.bias", + "backbone.levels.2.blocks.13.dcn.output_proj.bias", + "backbone.levels.2.blocks.13.norm2.0.weight", + "backbone.levels.2.blocks.13.norm2.0.bias", + "backbone.levels.2.blocks.13.mlp.fc1.bias", + "backbone.levels.2.blocks.13.mlp.fc2.bias" + ], + "lr_scale": 0.3715742908341006, + "lr": 7.4314858166820124e-06, + "weight_decay": 0.0 + }, + "layer_24_decay": { + "param_names": [ + "backbone.levels.2.blocks.13.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.13.dcn.offset.weight", + "backbone.levels.2.blocks.13.dcn.mask.weight", + "backbone.levels.2.blocks.13.dcn.input_proj.weight", + "backbone.levels.2.blocks.13.dcn.output_proj.weight", + "backbone.levels.2.blocks.13.mlp.fc1.weight", + "backbone.levels.2.blocks.13.mlp.fc2.weight" + ], + "lr_scale": 0.3715742908341006, + "lr": 7.4314858166820124e-06, + "weight_decay": 0.05 + }, + "layer_25_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.gamma1", + "backbone.levels.2.blocks.14.gamma2", + "backbone.levels.2.blocks.14.norm1.0.weight", + "backbone.levels.2.blocks.14.norm1.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.14.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.14.dcn.offset.bias", + "backbone.levels.2.blocks.14.dcn.mask.bias", + "backbone.levels.2.blocks.14.dcn.input_proj.bias", + "backbone.levels.2.blocks.14.dcn.output_proj.bias", + "backbone.levels.2.blocks.14.norm2.0.weight", + "backbone.levels.2.blocks.14.norm2.0.bias", + "backbone.levels.2.blocks.14.mlp.fc1.bias", + "backbone.levels.2.blocks.14.mlp.fc2.bias" + ], + "lr_scale": 0.3952917987596815, + "lr": 7.90583597519363e-06, + "weight_decay": 0.0 + }, + "layer_25_decay": { + "param_names": [ + "backbone.levels.2.blocks.14.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.14.dcn.offset.weight", + "backbone.levels.2.blocks.14.dcn.mask.weight", + "backbone.levels.2.blocks.14.dcn.input_proj.weight", + "backbone.levels.2.blocks.14.dcn.output_proj.weight", + "backbone.levels.2.blocks.14.mlp.fc1.weight", + "backbone.levels.2.blocks.14.mlp.fc2.weight" + ], + "lr_scale": 0.3952917987596815, + "lr": 7.90583597519363e-06, + "weight_decay": 0.05 + }, + "layer_26_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.gamma1", + "backbone.levels.2.blocks.15.gamma2", + "backbone.levels.2.blocks.15.norm1.0.weight", + "backbone.levels.2.blocks.15.norm1.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.15.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.15.dcn.offset.bias", + "backbone.levels.2.blocks.15.dcn.mask.bias", + "backbone.levels.2.blocks.15.dcn.input_proj.bias", + "backbone.levels.2.blocks.15.dcn.output_proj.bias", + "backbone.levels.2.blocks.15.norm2.0.weight", + "backbone.levels.2.blocks.15.norm2.0.bias", + "backbone.levels.2.blocks.15.mlp.fc1.bias", + "backbone.levels.2.blocks.15.mlp.fc2.bias" + ], + "lr_scale": 0.42052319016987394, + "lr": 8.41046380339748e-06, + "weight_decay": 0.0 + }, + "layer_26_decay": { + "param_names": [ + "backbone.levels.2.blocks.15.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.15.dcn.offset.weight", + "backbone.levels.2.blocks.15.dcn.mask.weight", + "backbone.levels.2.blocks.15.dcn.input_proj.weight", + "backbone.levels.2.blocks.15.dcn.output_proj.weight", + "backbone.levels.2.blocks.15.mlp.fc1.weight", + "backbone.levels.2.blocks.15.mlp.fc2.weight" + ], + "lr_scale": 0.42052319016987394, + "lr": 8.41046380339748e-06, + "weight_decay": 0.05 + }, + "layer_27_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.gamma1", + "backbone.levels.2.blocks.16.gamma2", + "backbone.levels.2.blocks.16.norm1.0.weight", + "backbone.levels.2.blocks.16.norm1.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.16.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.16.dcn.offset.bias", + "backbone.levels.2.blocks.16.dcn.mask.bias", + "backbone.levels.2.blocks.16.dcn.input_proj.bias", + "backbone.levels.2.blocks.16.dcn.output_proj.bias", + "backbone.levels.2.blocks.16.norm2.0.weight", + "backbone.levels.2.blocks.16.norm2.0.bias", + "backbone.levels.2.blocks.16.mlp.fc1.bias", + "backbone.levels.2.blocks.16.mlp.fc2.bias" + ], + "lr_scale": 0.44736509592539786, + "lr": 8.947301918507958e-06, + "weight_decay": 0.0 + }, + "layer_27_decay": { + "param_names": [ + "backbone.levels.2.blocks.16.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.16.dcn.offset.weight", + "backbone.levels.2.blocks.16.dcn.mask.weight", + "backbone.levels.2.blocks.16.dcn.input_proj.weight", + "backbone.levels.2.blocks.16.dcn.output_proj.weight", + "backbone.levels.2.blocks.16.mlp.fc1.weight", + "backbone.levels.2.blocks.16.mlp.fc2.weight" + ], + "lr_scale": 0.44736509592539786, + "lr": 8.947301918507958e-06, + "weight_decay": 0.05 + }, + "layer_28_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.gamma1", + "backbone.levels.2.blocks.17.gamma2", + "backbone.levels.2.blocks.17.norm1.0.weight", + "backbone.levels.2.blocks.17.norm1.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.17.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.17.dcn.offset.bias", + "backbone.levels.2.blocks.17.dcn.mask.bias", + "backbone.levels.2.blocks.17.dcn.input_proj.bias", + "backbone.levels.2.blocks.17.dcn.output_proj.bias", + "backbone.levels.2.blocks.17.norm2.0.weight", + "backbone.levels.2.blocks.17.norm2.0.bias", + "backbone.levels.2.blocks.17.mlp.fc1.bias", + "backbone.levels.2.blocks.17.mlp.fc2.bias" + ], + "lr_scale": 0.47592031481425306, + "lr": 9.518406296285062e-06, + "weight_decay": 0.0 + }, + "layer_28_decay": { + "param_names": [ + "backbone.levels.2.blocks.17.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.17.dcn.offset.weight", + "backbone.levels.2.blocks.17.dcn.mask.weight", + "backbone.levels.2.blocks.17.dcn.input_proj.weight", + "backbone.levels.2.blocks.17.dcn.output_proj.weight", + "backbone.levels.2.blocks.17.mlp.fc1.weight", + "backbone.levels.2.blocks.17.mlp.fc2.weight" + ], + "lr_scale": 0.47592031481425306, + "lr": 9.518406296285062e-06, + "weight_decay": 0.05 + }, + "layer_29_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.gamma1", + "backbone.levels.2.blocks.18.gamma2", + "backbone.levels.2.blocks.18.norm1.0.weight", + "backbone.levels.2.blocks.18.norm1.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.18.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.18.dcn.offset.bias", + "backbone.levels.2.blocks.18.dcn.mask.bias", + "backbone.levels.2.blocks.18.dcn.input_proj.bias", + "backbone.levels.2.blocks.18.dcn.output_proj.bias", + "backbone.levels.2.blocks.18.norm2.0.weight", + "backbone.levels.2.blocks.18.norm2.0.bias", + "backbone.levels.2.blocks.18.mlp.fc1.bias", + "backbone.levels.2.blocks.18.mlp.fc2.bias" + ], + "lr_scale": 0.5062982072492054, + "lr": 1.0125964144984108e-05, + "weight_decay": 0.0 + }, + "layer_29_decay": { + "param_names": [ + "backbone.levels.2.blocks.18.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.18.dcn.offset.weight", + "backbone.levels.2.blocks.18.dcn.mask.weight", + "backbone.levels.2.blocks.18.dcn.input_proj.weight", + "backbone.levels.2.blocks.18.dcn.output_proj.weight", + "backbone.levels.2.blocks.18.mlp.fc1.weight", + "backbone.levels.2.blocks.18.mlp.fc2.weight" + ], + "lr_scale": 0.5062982072492054, + "lr": 1.0125964144984108e-05, + "weight_decay": 0.05 + }, + "layer_30_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.gamma1", + "backbone.levels.2.blocks.19.gamma2", + "backbone.levels.2.blocks.19.norm1.0.weight", + "backbone.levels.2.blocks.19.norm1.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.19.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.19.dcn.offset.bias", + "backbone.levels.2.blocks.19.dcn.mask.bias", + "backbone.levels.2.blocks.19.dcn.input_proj.bias", + "backbone.levels.2.blocks.19.dcn.output_proj.bias", + "backbone.levels.2.blocks.19.norm2.0.weight", + "backbone.levels.2.blocks.19.norm2.0.bias", + "backbone.levels.2.blocks.19.mlp.fc1.bias", + "backbone.levels.2.blocks.19.mlp.fc2.bias" + ], + "lr_scale": 0.5386151140948994, + "lr": 1.0772302281897988e-05, + "weight_decay": 0.0 + }, + "layer_30_decay": { + "param_names": [ + "backbone.levels.2.blocks.19.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.19.dcn.offset.weight", + "backbone.levels.2.blocks.19.dcn.mask.weight", + "backbone.levels.2.blocks.19.dcn.input_proj.weight", + "backbone.levels.2.blocks.19.dcn.output_proj.weight", + "backbone.levels.2.blocks.19.mlp.fc1.weight", + "backbone.levels.2.blocks.19.mlp.fc2.weight" + ], + "lr_scale": 0.5386151140948994, + "lr": 1.0772302281897988e-05, + "weight_decay": 0.05 + }, + "layer_31_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.gamma1", + "backbone.levels.2.blocks.20.gamma2", + "backbone.levels.2.blocks.20.norm1.0.weight", + "backbone.levels.2.blocks.20.norm1.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.20.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.20.dcn.offset.bias", + "backbone.levels.2.blocks.20.dcn.mask.bias", + "backbone.levels.2.blocks.20.dcn.input_proj.bias", + "backbone.levels.2.blocks.20.dcn.output_proj.bias", + "backbone.levels.2.blocks.20.norm2.0.weight", + "backbone.levels.2.blocks.20.norm2.0.bias", + "backbone.levels.2.blocks.20.mlp.fc1.bias", + "backbone.levels.2.blocks.20.mlp.fc2.bias" + ], + "lr_scale": 0.5729948022286164, + "lr": 1.145989604457233e-05, + "weight_decay": 0.0 + }, + "layer_31_decay": { + "param_names": [ + "backbone.levels.2.blocks.20.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.20.dcn.offset.weight", + "backbone.levels.2.blocks.20.dcn.mask.weight", + "backbone.levels.2.blocks.20.dcn.input_proj.weight", + "backbone.levels.2.blocks.20.dcn.output_proj.weight", + "backbone.levels.2.blocks.20.mlp.fc1.weight", + "backbone.levels.2.blocks.20.mlp.fc2.weight" + ], + "lr_scale": 0.5729948022286164, + "lr": 1.145989604457233e-05, + "weight_decay": 0.05 + }, + "layer_32_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.gamma1", + "backbone.levels.2.blocks.21.gamma2", + "backbone.levels.2.blocks.21.norm1.0.weight", + "backbone.levels.2.blocks.21.norm1.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.21.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.21.dcn.offset.bias", + "backbone.levels.2.blocks.21.dcn.mask.bias", + "backbone.levels.2.blocks.21.dcn.input_proj.bias", + "backbone.levels.2.blocks.21.dcn.output_proj.bias", + "backbone.levels.2.blocks.21.norm2.0.weight", + "backbone.levels.2.blocks.21.norm2.0.bias", + "backbone.levels.2.blocks.21.mlp.fc1.bias", + "backbone.levels.2.blocks.21.mlp.fc2.bias" + ], + "lr_scale": 0.6095689385410813, + "lr": 1.2191378770821627e-05, + "weight_decay": 0.0 + }, + "layer_32_decay": { + "param_names": [ + "backbone.levels.2.blocks.21.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.21.dcn.offset.weight", + "backbone.levels.2.blocks.21.dcn.mask.weight", + "backbone.levels.2.blocks.21.dcn.input_proj.weight", + "backbone.levels.2.blocks.21.dcn.output_proj.weight", + "backbone.levels.2.blocks.21.mlp.fc1.weight", + "backbone.levels.2.blocks.21.mlp.fc2.weight" + ], + "lr_scale": 0.6095689385410813, + "lr": 1.2191378770821627e-05, + "weight_decay": 0.05 + }, + "layer_33_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.gamma1", + "backbone.levels.2.blocks.22.gamma2", + "backbone.levels.2.blocks.22.norm1.0.weight", + "backbone.levels.2.blocks.22.norm1.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.22.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.22.dcn.offset.bias", + "backbone.levels.2.blocks.22.dcn.mask.bias", + "backbone.levels.2.blocks.22.dcn.input_proj.bias", + "backbone.levels.2.blocks.22.dcn.output_proj.bias", + "backbone.levels.2.blocks.22.norm2.0.weight", + "backbone.levels.2.blocks.22.norm2.0.bias", + "backbone.levels.2.blocks.22.mlp.fc1.bias", + "backbone.levels.2.blocks.22.mlp.fc2.bias" + ], + "lr_scale": 0.6484775941926397, + "lr": 1.2969551883852795e-05, + "weight_decay": 0.0 + }, + "layer_33_decay": { + "param_names": [ + "backbone.levels.2.blocks.22.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.22.dcn.offset.weight", + "backbone.levels.2.blocks.22.dcn.mask.weight", + "backbone.levels.2.blocks.22.dcn.input_proj.weight", + "backbone.levels.2.blocks.22.dcn.output_proj.weight", + "backbone.levels.2.blocks.22.mlp.fc1.weight", + "backbone.levels.2.blocks.22.mlp.fc2.weight" + ], + "lr_scale": 0.6484775941926397, + "lr": 1.2969551883852795e-05, + "weight_decay": 0.05 + }, + "layer_34_no_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.gamma1", + "backbone.levels.2.blocks.23.gamma2", + "backbone.levels.2.blocks.23.norm1.0.weight", + "backbone.levels.2.blocks.23.norm1.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.0.bias", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.weight", + "backbone.levels.2.blocks.23.dcn.dw_conv.1.1.bias", + "backbone.levels.2.blocks.23.dcn.offset.bias", + "backbone.levels.2.blocks.23.dcn.mask.bias", + "backbone.levels.2.blocks.23.dcn.input_proj.bias", + "backbone.levels.2.blocks.23.dcn.output_proj.bias", + "backbone.levels.2.blocks.23.norm2.0.weight", + "backbone.levels.2.blocks.23.norm2.0.bias", + "backbone.levels.2.blocks.23.mlp.fc1.bias", + "backbone.levels.2.blocks.23.mlp.fc2.bias" + ], + "lr_scale": 0.6898697810559997, + "lr": 1.3797395621119996e-05, + "weight_decay": 0.0 + }, + "layer_34_decay": { + "param_names": [ + "backbone.levels.2.blocks.23.dcn.dw_conv.0.weight", + "backbone.levels.2.blocks.23.dcn.offset.weight", + "backbone.levels.2.blocks.23.dcn.mask.weight", + "backbone.levels.2.blocks.23.dcn.input_proj.weight", + "backbone.levels.2.blocks.23.dcn.output_proj.weight", + "backbone.levels.2.blocks.23.mlp.fc1.weight", + "backbone.levels.2.blocks.23.mlp.fc2.weight" + ], + "lr_scale": 0.6898697810559997, + "lr": 1.3797395621119996e-05, + "weight_decay": 0.05 + }, + "layer_35_decay": { + "param_names": [ + "backbone.levels.2.downsample.conv.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.0.dcn.offset.weight", + "backbone.levels.3.blocks.0.dcn.mask.weight", + "backbone.levels.3.blocks.0.dcn.input_proj.weight", + "backbone.levels.3.blocks.0.dcn.output_proj.weight", + "backbone.levels.3.blocks.0.mlp.fc1.weight", + "backbone.levels.3.blocks.0.mlp.fc2.weight" + ], + "lr_scale": 0.7339040223999997, + "lr": 1.4678080447999996e-05, + "weight_decay": 0.05 + }, + "layer_35_no_decay": { + "param_names": [ + "backbone.levels.2.downsample.norm.1.weight", + "backbone.levels.2.downsample.norm.1.bias", + "backbone.levels.3.blocks.0.gamma1", + "backbone.levels.3.blocks.0.gamma2", + "backbone.levels.3.blocks.0.norm1.0.weight", + "backbone.levels.3.blocks.0.norm1.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.0.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.0.dcn.offset.bias", + "backbone.levels.3.blocks.0.dcn.mask.bias", + "backbone.levels.3.blocks.0.dcn.input_proj.bias", + "backbone.levels.3.blocks.0.dcn.output_proj.bias", + "backbone.levels.3.blocks.0.norm2.0.weight", + "backbone.levels.3.blocks.0.norm2.0.bias", + "backbone.levels.3.blocks.0.mlp.fc1.bias", + "backbone.levels.3.blocks.0.mlp.fc2.bias" + ], + "lr_scale": 0.7339040223999997, + "lr": 1.4678080447999996e-05, + "weight_decay": 0.0 + }, + "layer_36_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.gamma1", + "backbone.levels.3.blocks.1.gamma2", + "backbone.levels.3.blocks.1.norm1.0.weight", + "backbone.levels.3.blocks.1.norm1.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.1.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.1.dcn.offset.bias", + "backbone.levels.3.blocks.1.dcn.mask.bias", + "backbone.levels.3.blocks.1.dcn.input_proj.bias", + "backbone.levels.3.blocks.1.dcn.output_proj.bias", + "backbone.levels.3.blocks.1.norm2.0.weight", + "backbone.levels.3.blocks.1.norm2.0.bias", + "backbone.levels.3.blocks.1.mlp.fc1.bias", + "backbone.levels.3.blocks.1.mlp.fc2.bias" + ], + "lr_scale": 0.7807489599999998, + "lr": 1.5614979199999998e-05, + "weight_decay": 0.0 + }, + "layer_36_decay": { + "param_names": [ + "backbone.levels.3.blocks.1.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.1.dcn.offset.weight", + "backbone.levels.3.blocks.1.dcn.mask.weight", + "backbone.levels.3.blocks.1.dcn.input_proj.weight", + "backbone.levels.3.blocks.1.dcn.output_proj.weight", + "backbone.levels.3.blocks.1.mlp.fc1.weight", + "backbone.levels.3.blocks.1.mlp.fc2.weight" + ], + "lr_scale": 0.7807489599999998, + "lr": 1.5614979199999998e-05, + "weight_decay": 0.05 + }, + "layer_37_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.gamma1", + "backbone.levels.3.blocks.2.gamma2", + "backbone.levels.3.blocks.2.norm1.0.weight", + "backbone.levels.3.blocks.2.norm1.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.2.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.2.dcn.offset.bias", + "backbone.levels.3.blocks.2.dcn.mask.bias", + "backbone.levels.3.blocks.2.dcn.input_proj.bias", + "backbone.levels.3.blocks.2.dcn.output_proj.bias", + "backbone.levels.3.blocks.2.norm2.0.weight", + "backbone.levels.3.blocks.2.norm2.0.bias", + "backbone.levels.3.blocks.2.mlp.fc1.bias", + "backbone.levels.3.blocks.2.mlp.fc2.bias" + ], + "lr_scale": 0.8305839999999999, + "lr": 1.6611679999999997e-05, + "weight_decay": 0.0 + }, + "layer_37_decay": { + "param_names": [ + "backbone.levels.3.blocks.2.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.2.dcn.offset.weight", + "backbone.levels.3.blocks.2.dcn.mask.weight", + "backbone.levels.3.blocks.2.dcn.input_proj.weight", + "backbone.levels.3.blocks.2.dcn.output_proj.weight", + "backbone.levels.3.blocks.2.mlp.fc1.weight", + "backbone.levels.3.blocks.2.mlp.fc2.weight" + ], + "lr_scale": 0.8305839999999999, + "lr": 1.6611679999999997e-05, + "weight_decay": 0.05 + }, + "layer_38_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.gamma1", + "backbone.levels.3.blocks.3.gamma2", + "backbone.levels.3.blocks.3.norm1.0.weight", + "backbone.levels.3.blocks.3.norm1.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.3.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.3.dcn.offset.bias", + "backbone.levels.3.blocks.3.dcn.mask.bias", + "backbone.levels.3.blocks.3.dcn.input_proj.bias", + "backbone.levels.3.blocks.3.dcn.output_proj.bias", + "backbone.levels.3.blocks.3.norm2.0.weight", + "backbone.levels.3.blocks.3.norm2.0.bias", + "backbone.levels.3.blocks.3.mlp.fc1.bias", + "backbone.levels.3.blocks.3.mlp.fc2.bias" + ], + "lr_scale": 0.8835999999999999, + "lr": 1.7672e-05, + "weight_decay": 0.0 + }, + "layer_38_decay": { + "param_names": [ + "backbone.levels.3.blocks.3.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.3.dcn.offset.weight", + "backbone.levels.3.blocks.3.dcn.mask.weight", + "backbone.levels.3.blocks.3.dcn.input_proj.weight", + "backbone.levels.3.blocks.3.dcn.output_proj.weight", + "backbone.levels.3.blocks.3.mlp.fc1.weight", + "backbone.levels.3.blocks.3.mlp.fc2.weight" + ], + "lr_scale": 0.8835999999999999, + "lr": 1.7672e-05, + "weight_decay": 0.05 + }, + "layer_39_no_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.gamma1", + "backbone.levels.3.blocks.4.gamma2", + "backbone.levels.3.blocks.4.norm1.0.weight", + "backbone.levels.3.blocks.4.norm1.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.0.bias", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.weight", + "backbone.levels.3.blocks.4.dcn.dw_conv.1.1.bias", + "backbone.levels.3.blocks.4.dcn.offset.bias", + "backbone.levels.3.blocks.4.dcn.mask.bias", + "backbone.levels.3.blocks.4.dcn.input_proj.bias", + "backbone.levels.3.blocks.4.dcn.output_proj.bias", + "backbone.levels.3.blocks.4.norm2.0.weight", + "backbone.levels.3.blocks.4.norm2.0.bias", + "backbone.levels.3.blocks.4.mlp.fc1.bias", + "backbone.levels.3.blocks.4.mlp.fc2.bias" + ], + "lr_scale": 0.94, + "lr": 1.88e-05, + "weight_decay": 0.0 + }, + "layer_39_decay": { + "param_names": [ + "backbone.levels.3.blocks.4.dcn.dw_conv.0.weight", + "backbone.levels.3.blocks.4.dcn.offset.weight", + "backbone.levels.3.blocks.4.dcn.mask.weight", + "backbone.levels.3.blocks.4.dcn.input_proj.weight", + "backbone.levels.3.blocks.4.dcn.output_proj.weight", + "backbone.levels.3.blocks.4.mlp.fc1.weight", + "backbone.levels.3.blocks.4.mlp.fc2.weight" + ], + "lr_scale": 0.94, + "lr": 1.88e-05, + "weight_decay": 0.05 + }, + "layer_40_decay": { + "param_names": [ + "decode_head.conv_seg.weight", + "decode_head.convs.0.conv.weight", + "decode_head.convs.1.conv.weight", + "decode_head.convs.2.conv.weight", + "decode_head.convs.3.conv.weight", + "decode_head.fusion_conv.conv.weight" + ], + "lr_scale": 1.0, + "lr": 2e-05, + "weight_decay": 0.05 + }, + "layer_40_no_decay": { + "param_names": [ + "decode_head.conv_seg.bias", + "decode_head.convs.0.bn.weight", + "decode_head.convs.0.bn.bias", + "decode_head.convs.1.bn.weight", + "decode_head.convs.1.bn.bias", + "decode_head.convs.2.bn.weight", + "decode_head.convs.2.bn.bias", + "decode_head.convs.3.bn.weight", + "decode_head.convs.3.bn.bias", + "decode_head.fusion_conv.bn.weight", + "decode_head.fusion_conv.bn.bias" + ], + "lr_scale": 1.0, + "lr": 2e-05, + "weight_decay": 0.0 + } +} +2025-05-28 03:16:37,607 - mmseg - INFO - Loaded 1159 images +2025-05-28 03:16:37,615 - mmseg - INFO - load checkpoint from http path: https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_xl_512x1024_80k_mapillary.pth +2025-05-28 03:16:58,651 - mmseg - INFO - Start running, host: yiming@pasteur-hgx-1, work_dir: /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 +2025-05-28 03:16:58,652 - mmseg - INFO - Hooks will be executed in the following order: +before_run: +(VERY_HIGH ) PolyLrUpdaterHook +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_epoch: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_train_iter: +(VERY_HIGH ) PolyLrUpdaterHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook + -------------------- +after_train_iter: +(ABOVE_NORMAL) OptimizerHook +(NORMAL ) CheckpointHook +(LOW ) IterTimerHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_train_epoch: +(NORMAL ) CheckpointHook +(LOW ) DistEvalHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_epoch: +(LOW ) IterTimerHook +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +before_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_iter: +(LOW ) IterTimerHook + -------------------- +after_val_epoch: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +after_run: +(VERY_LOW ) TextLoggerHook +(VERY_LOW ) TensorboardLoggerHook + -------------------- +2025-05-28 03:16:58,652 - mmseg - INFO - workflow: [('train', 1)], max: 160000 iters +2025-05-28 03:16:58,652 - mmseg - INFO - Checkpoints will be saved to /pasteur/u/yiming/homework4/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1 by HardDiskBackend. diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031622.log.json b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031622.log.json new file mode 100644 index 0000000000000000000000000000000000000000..3969c759a689bf33bd6f8c8d2f35876059cfe993 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/20250528_031622.log.json @@ -0,0 +1 @@ +{"env_info": "sys.platform: linux\nPython: 3.9.22 (main, Apr 9 2025, 04:03:41) [Clang 20.1.0 ]\nCUDA available: True\nGPU 0,1,2,3,4,5,6,7: NVIDIA A100-SXM4-80GB\nCUDA_HOME: /usr/local/cuda\nNVCC: Cuda compilation tools, release 11.7, V11.7.64\nGCC: cc (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0\nPyTorch: 1.11.0+cu113\nPyTorch compiling details: PyTorch built with:\n - GCC 7.3\n - C++ Version: 201402\n - Intel(R) Math Kernel Library Version 2020.0.0 Product Build 20191122 for Intel(R) 64 architecture applications\n - Intel(R) MKL-DNN v2.5.2 (Git Hash a9302535553c73243c632ad3c4c80beec3d19a1e)\n - OpenMP 201511 (a.k.a. OpenMP 4.5)\n - LAPACK is enabled (usually provided by MKL)\n - NNPACK is enabled\n - CPU capability usage: AVX2\n - CUDA Runtime 11.3\n - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_80,code=sm_80;-gencode;arch=compute_86,code=sm_86\n - CuDNN 8.2\n - Magma 2.5.2\n - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=11.3, CUDNN_VERSION=8.2.0, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.11.0, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, USE_ROCM=OFF, \n\nTorchVision: 0.12.0+cu113\nOpenCV: 4.11.0\nMMCV: 1.5.0\nMMCV Compiler: GCC 7.3\nMMCV CUDA Compiler: 11.3\nMMSegmentation: 0.27.0+12dc934", "seed": 1742917995, "exp_name": "segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.py", "mmseg_version": "0.27.0+12dc934", "config": "norm_cfg = dict(type='SyncBN', requires_grad=True)\nmodel = dict(\n type='EncoderDecoder',\n pretrained=None,\n backbone=dict(\n type='InternImage',\n core_op='DCNv3',\n channels=192,\n depths=[5, 5, 24, 5],\n groups=[12, 24, 48, 96],\n mlp_ratio=4.0,\n drop_path_rate=0.4,\n norm_layer='LN',\n layer_scale=1.0,\n offset_scale=2.0,\n post_norm=True,\n with_cp=False,\n out_indices=(0, 1, 2, 3),\n init_cfg=None),\n decode_head=dict(\n type='SegformerHead',\n in_channels=[192, 384, 768, 1536],\n in_index=[0, 1, 2, 3],\n channels=256,\n dropout_ratio=0.1,\n num_classes=150,\n norm_cfg=dict(type='SyncBN', requires_grad=True),\n align_corners=False,\n loss_decode=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),\n train_cfg=dict(),\n test_cfg=dict(mode='whole'))\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ncrop_size = (512, 1024)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=255),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_semantic_seg'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n]\ndata = dict(\n samples_per_gpu=2,\n workers_per_gpu=2,\n train=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),\n dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=255),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_semantic_seg'])\n ],\n split='splits/fold_1_train_split.txt'),\n val=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/',\n ann_dir='gtFine/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ],\n split='splits/fold_1_val_split.txt'),\n test=dict(\n type='CityscapesDataset',\n data_root='/pasteur/u/yiming/homework4/cityscapes',\n img_dir='leftImg8bit/val/',\n ann_dir='gtFine/val/',\n pipeline=[\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(2048, 1024),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(\n type='Normalize',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n to_rgb=True),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='Collect', keys=['img'])\n ])\n ]))\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook', by_epoch=False),\n dict(type='TensorboardLoggerHook')\n ])\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_xl_512x1024_80k_mapillary.pth'\nresume_from = None\nworkflow = [('train', 1)]\ncudnn_benchmark = True\noptimizer = dict(\n type='AdamW',\n lr=2e-05,\n betas=(0.9, 0.999),\n weight_decay=0.05,\n constructor='CustomLayerDecayOptimizerConstructor',\n paramwise_cfg=dict(\n num_layers=39,\n layer_decay_rate=0.94,\n depths=[5, 5, 24, 5],\n offset_lr_scale=1.0))\noptimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))\nlr_config = dict(\n policy='poly',\n warmup='linear',\n warmup_iters=1500,\n warmup_ratio=1e-06,\n power=1.0,\n min_lr=0.0,\n by_epoch=False)\nrunner = dict(type='IterBasedRunner', max_iters=160000)\ncheckpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1)\nevaluation = dict(\n interval=4000, metric='mIoU', pre_eval=True, save_best='mIoU')\nwork_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1'\ngpu_ids = range(0, 8)\nauto_resume = False\ndevice = 'cuda'\nseed = 1742917995\n", "CLASSES": ["road", "sidewalk", "building", "wall", "fence", "pole", "traffic light", "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car", "truck", "bus", "train", "motorcycle", "bicycle"], "PALETTE": [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], [0, 0, 230], [119, 11, 32]], "hook_msgs": {}} diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..dfddbed993cb5262596f064face3a0697943b8dc --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/mask2former_internimage_h_1024x1024_80k_mapillary2cityscapes.py @@ -0,0 +1,308 @@ +num_things_classes = 100 +num_stuff_classes = 50 +num_classes = 19 +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoderMask2Former', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=320, + depths=[6, 6, 32, 6], + groups=[10, 20, 40, 80], + mlp_ratio=4.0, + drop_path_rate=0.5, + norm_layer='LN', + layer_scale=None, + offset_scale=1.0, + post_norm=False, + dw_kernel_size=5, + res_post_norm=True, + level2_post_norm=True, + level2_post_norm_block_ids=[5, 11, 17, 23, 29], + center_feature_scale=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='Mask2FormerHead', + in_channels=[320, 640, 1280, 2560], + feat_channels=256, + out_channels=256, + in_index=[0, 1, 2, 3], + num_things_classes=100, + num_stuff_classes=50, + num_queries=100, + num_transformer_feat_level=3, + pixel_decoder=dict( + type='MSDeformAttnPixelDecoder', + num_outs=3, + norm_cfg=dict(type='GN', num_groups=32), + act_cfg=dict(type='ReLU'), + encoder=dict( + type='DetrTransformerEncoder', + num_layers=6, + transformerlayers=dict( + type='BaseTransformerLayer', + attn_cfgs=dict( + type='MultiScaleDeformableAttention', + embed_dims=256, + num_heads=8, + num_levels=3, + num_points=4, + im2col_step=64, + dropout=0.0, + batch_first=False, + norm_cfg=None, + init_cfg=None), + ffn_cfgs=dict( + type='FFN', + embed_dims=256, + feedforward_channels=1024, + num_fcs=2, + ffn_drop=0.0, + act_cfg=dict(type='ReLU', inplace=True), + with_cp=False), + operation_order=('self_attn', 'norm', 'ffn', 'norm')), + init_cfg=None), + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + init_cfg=None), + enforce_decoder_input_project=False, + positional_encoding=dict( + type='SinePositionalEncoding', num_feats=128, normalize=True), + transformer_decoder=dict( + type='DetrTransformerDecoder', + return_intermediate=True, + num_layers=9, + transformerlayers=dict( + type='DetrTransformerDecoderLayer', + attn_cfgs=dict( + type='MultiheadAttention', + embed_dims=256, + num_heads=8, + attn_drop=0.0, + proj_drop=0.0, + dropout_layer=None, + batch_first=False), + ffn_cfgs=dict( + embed_dims=256, + feedforward_channels=2048, + num_fcs=2, + act_cfg=dict(type='ReLU', inplace=True), + ffn_drop=0.0, + dropout_layer=None, + add_identity=True, + with_cp=False), + feedforward_channels=2048, + operation_order=('cross_attn', 'norm', 'self_attn', 'norm', + 'ffn', 'norm')), + init_cfg=None), + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=2.0, + reduction='mean', + class_weight=[ + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.1 + ]), + loss_mask=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + reduction='mean', + loss_weight=5.0), + loss_dice=dict( + type='DiceLoss', + use_sigmoid=True, + activate=True, + reduction='mean', + naive_dice=True, + eps=1.0, + loss_weight=5.0), + num_classes=19), + train_cfg=dict( + num_points=12544, + oversample_ratio=3.0, + importance_sample_ratio=0.75, + assigner=dict( + type='MaskHungarianAssigner', + cls_cost=dict(type='ClassificationCost', weight=2.0), + mask_cost=dict( + type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), + dice_cost=dict( + type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), + sampler=dict(type='MaskPseudoSampler')), + test_cfg=dict( + panoptic_on=True, + semantic_on=False, + instance_on=True, + max_per_image=100, + iou_thr=0.8, + filter_low_score=True, + mode='slide', + crop_size=(1024, 1024), + stride=(512, 512)), + init_cfg=None) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (1024, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict( + type='RandomCrop', crop_size=(1024, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(1024, 1024), pad_val=0, seg_pad_val=255), + dict(type='ToMask'), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg', 'gt_masks', 'gt_labels']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='ResizeToMultiple', size_divisor=32), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/mask2former_internimage_h_896x896_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=1e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=50, + layer_decay_rate=0.95, + depths=[6, 6, 32, 6], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=80000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=2000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 8) +auto_resume = False diff --git a/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.py b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..a0be979edc16af0e08bf73d58970740082b1f5c4 --- /dev/null +++ b/segmentation/work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1/segformer_internimage_xl_512x1024_160k_mapillary2cityscapes.py @@ -0,0 +1,183 @@ +norm_cfg = dict(type='SyncBN', requires_grad=True) +model = dict( + type='EncoderDecoder', + pretrained=None, + backbone=dict( + type='InternImage', + core_op='DCNv3', + channels=192, + depths=[5, 5, 24, 5], + groups=[12, 24, 48, 96], + mlp_ratio=4.0, + drop_path_rate=0.4, + norm_layer='LN', + layer_scale=1.0, + offset_scale=2.0, + post_norm=True, + with_cp=False, + out_indices=(0, 1, 2, 3), + init_cfg=None), + decode_head=dict( + type='SegformerHead', + in_channels=[192, 384, 768, 1536], + in_index=[0, 1, 2, 3], + channels=256, + dropout_ratio=0.1, + num_classes=150, + norm_cfg=dict(type='SyncBN', requires_grad=True), + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + train_cfg=dict(), + test_cfg=dict(mode='whole')) +dataset_type = 'CityscapesDataset' +data_root = 'data/cityscapes/' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +crop_size = (512, 1024) +train_pipeline = [ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict(type='LoadAnnotations'), + dict( + type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)), + dict(type='RandomCrop', crop_size=(512, 1024), cat_max_ratio=0.75), + dict(type='RandomFlip', prob=0.5), + dict(type='PhotoMetricDistortion'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='Pad', size=(512, 1024), pad_val=0, seg_pad_val=255), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_semantic_seg']) + ], + split='splits/fold_1_train_split.txt'), + val=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/', + ann_dir='gtFine/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ], + split='splits/fold_1_val_split.txt'), + test=dict( + type='CityscapesDataset', + data_root='/pasteur/u/yiming/homework4/cityscapes', + img_dir='leftImg8bit/val/', + ann_dir='gtFine/val/', + pipeline=[ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(2048, 1024), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict( + type='Normalize', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + to_rgb=True), + dict(type='ImageToTensor', keys=['img']), + dict(type='Collect', keys=['img']) + ]) + ])) +log_config = dict( + interval=50, + hooks=[ + dict(type='TextLoggerHook', by_epoch=False), + dict(type='TensorboardLoggerHook') + ]) +dist_params = dict(backend='nccl') +log_level = 'INFO' +load_from = 'https://huggingface.co/OpenGVLab/InternImage/resolve/main/segformer_internimage_xl_512x1024_80k_mapillary.pth' +resume_from = None +workflow = [('train', 1)] +cudnn_benchmark = True +optimizer = dict( + type='AdamW', + lr=2e-05, + betas=(0.9, 0.999), + weight_decay=0.05, + constructor='CustomLayerDecayOptimizerConstructor', + paramwise_cfg=dict( + num_layers=39, + layer_decay_rate=0.94, + depths=[5, 5, 24, 5], + offset_lr_scale=1.0)) +optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) +lr_config = dict( + policy='poly', + warmup='linear', + warmup_iters=1500, + warmup_ratio=1e-06, + power=1.0, + min_lr=0.0, + by_epoch=False) +runner = dict(type='IterBasedRunner', max_iters=160000) +checkpoint_config = dict(by_epoch=False, interval=1000, max_keep_ckpts=1) +evaluation = dict( + interval=4000, metric='mIoU', pre_eval=True, save_best='mIoU') +work_dir = 'work_dirs/cityscapes_kfold/mask2former_internimage_h_fold_1' +gpu_ids = range(0, 8) +auto_resume = False