File size: 521 Bytes
9692df7
 
 
873e161
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
from transformers import AutoImageProcessor
import torchvision.transforms as T

def build_transforms(backbone_model="google/vit-base-patch16-224-in21k"):
    processor = AutoImageProcessor.from_pretrained(backbone_model)
    return T.Compose([
        T.RandomResizedCrop(224),
        T.RandomHorizontalFlip(),
        T.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
        T.RandomRotation(20),
        T.ToTensor(),
        T.Normalize(mean=processor.image_mean, std=processor.image_std)
    ])