File size: 2,573 Bytes
a26ae4f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from ultralytics import YOLO
from ultralytics.data import build_dataloader
from ultralytics.data.dataset import YOLODataset
import torch
import cv2

class CustomYOLODataset(YOLODataset):
    def __init__(self, *args, **kwargs):
        kwargs["data"] = dict(kwargs.get("data", {}), channels=4)
        super().__init__(*args, **kwargs)
    
    def __getitem__(self, index):
        img_path = self.im_files[index]
        img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
        assert img.shape[-1] == 4, f"Image {img_path} has {img.shape[-1]} channels"
        return super().__getitem__(index)

def build_dataloader_override(cfg, batch, img_size, stride, single_cls=False, hyp=None, augment=False, cache=False, pad=0.0, rect=False, rank=-1, workers=8, shuffle=False, data_info=None):
    dataset = CustomYOLODataset(
        data=data_info,
        img_size=img_size,
        batch_size=batch,
        augment=augment,
        hyp=hyp,
        rect=rect,
        cache=cache,
        single_cls=single_cls,
        stride=int(stride),
        pad=pad,
        rank=rank,
    )
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=batch,
        shuffle=shuffle,
        num_workers=workers,
        sampler=None,
        pin_memory=True,
        collate_fn=getattr(dataset, "collate_fn", None),
    )
    return loader

build_dataloader.build_dataloader = build_dataloader_override

# Initialize model
model = YOLO("yolo11_rgbd.yaml")  # Ensure YAML has ch=4

# ---- Load Pretrained Weights ----
# pretrained = YOLO("yolo11l.pt").model.state_dict()
pretrained = YOLO("yolo11n.pt").model.state_dict()
model_state = model.model.state_dict()
filtered_pretrained = {k: v for k, v in pretrained.items() if not k.startswith(("model.23", "model.0.conv"))}
model_state.update(filtered_pretrained)

with torch.no_grad():
    rgb_weights = pretrained["model.0.conv.weight"][:, :3]
    depth_weights = torch.randn(64, 1, 3, 3) * 0.1 # FOr Yolov11l model
    # depth_weights = torch.randn(16, 1, 3, 3) * 0.1 # For Yolov11n model
    model_state["model.0.conv.weight"] = torch.cat([rgb_weights, depth_weights], dim=1)

model.model.load_state_dict(model_state, strict=False)

# ---- Critical Warmup Fix ----
def custom_warmup(self, imgsz=(1, 4, 640, 640)):  # Force 4-channel input
    self.forward(torch.zeros(imgsz).to(self.device))

model.model.warmup = custom_warmup.__get__(model.model)

# Train
model.train(
    data="usplf_rgbd_dataset.yaml",
    epochs=200,
    imgsz=640,
    batch=10,
    device="0",
    name="yolov11_rgbd_pretrained"
)