crop_health_monitor / transforms.py
CZerion's picture
Update transforms.py
873e161 verified
raw
history blame contribute delete
521 Bytes
from transformers import AutoImageProcessor
import torchvision.transforms as T
def build_transforms(backbone_model="google/vit-base-patch16-224-in21k"):
processor = AutoImageProcessor.from_pretrained(backbone_model)
return T.Compose([
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.1),
T.RandomRotation(20),
T.ToTensor(),
T.Normalize(mean=processor.image_mean, std=processor.image_std)
])