Upload 18 files
Browse files- .gitattributes +2 -0
- Custom_Resnet_v1.py +72 -0
- README.MD +38 -0
- app.py +90 -0
- bird.jpg +0 -0
- cat.jpg +0 -0
- custom_resnet.py +98 -0
- cyclic_lr_util.py +33 -0
- data_transform_cifar10_custom_resnet.py +45 -0
- deer.jpg +3 -0
- dog.jpg +0 -0
- epoch=22-step=4140.ckpt +3 -0
- frog.jpg +0 -0
- horse.jpg +0 -0
- plane.jpg +0 -0
- requirements.txt +10 -0
- ship.jpg +3 -0
- truck.jpg +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
deer.jpg filter=lfs diff=lfs merge=lfs -text
|
37 |
+
ship.jpg filter=lfs diff=lfs merge=lfs -text
|
Custom_Resnet_v1.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import torch.nn.functional as F
|
4 |
+
|
5 |
+
|
6 |
+
class ConvBNBlock(nn.Module):
|
7 |
+
def __init__(self, in_planes, planes, stride=1, p=0.0):
|
8 |
+
super(ConvBNBlock, self).__init__()
|
9 |
+
self.dropout_prob = p
|
10 |
+
self.conv_bn_block = nn.Sequential(
|
11 |
+
nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False),
|
12 |
+
nn.BatchNorm2d(planes)
|
13 |
+
)
|
14 |
+
self.drop_out = nn.Dropout2d(p=self.dropout_prob)
|
15 |
+
|
16 |
+
def forward(self, x):
|
17 |
+
out =F.relu(self.drop_out(self.conv_bn_block(x)) )
|
18 |
+
return out
|
19 |
+
|
20 |
+
class TransitionBlock(nn.Module):
|
21 |
+
def __init__(self, in_planes, planes, stride=1, p=0.0):
|
22 |
+
super(TransitionBlock, self).__init__()
|
23 |
+
self.p = p
|
24 |
+
self.transition_block = nn.Sequential(
|
25 |
+
nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False),
|
26 |
+
nn.BatchNorm2d(planes),
|
27 |
+
nn.ReLU(inplace=True),
|
28 |
+
nn.MaxPool2d(2, 2),
|
29 |
+
nn.Dropout2d(p=self.p)
|
30 |
+
)
|
31 |
+
|
32 |
+
def forward(self, x):
|
33 |
+
x = self.transition_block(x)
|
34 |
+
return x
|
35 |
+
|
36 |
+
class ResBlock(nn.Module):
|
37 |
+
def __init__(self, in_planes, planes, stride=1, p=0.0):
|
38 |
+
super(ResBlock, self).__init__()
|
39 |
+
self.p = p
|
40 |
+
self.transition_block = TransitionBlock(in_planes, planes, stride, p)
|
41 |
+
self.conv_block1 = ConvBNBlock(planes, planes, stride, p)
|
42 |
+
self.conv_block2 = ConvBNBlock(planes, planes, stride, p)
|
43 |
+
|
44 |
+
|
45 |
+
def forward(self, x):
|
46 |
+
x = self.transition_block(x)
|
47 |
+
r = self.conv_block2(self.conv_block1(x))
|
48 |
+
out = x + r
|
49 |
+
return out
|
50 |
+
|
51 |
+
class CustomResNet(nn.Module):
|
52 |
+
def __init__(self, p=0.0, num_classes=10):
|
53 |
+
super(CustomResNet, self).__init__()
|
54 |
+
self.in_planes = 64
|
55 |
+
self.p = p
|
56 |
+
|
57 |
+
self.conv = ConvBNBlock(3, 64, 1, p)
|
58 |
+
self.layer1 = ResBlock(64, 128, 1, p)
|
59 |
+
self.layer2 = TransitionBlock(128, 256, 1, p)
|
60 |
+
self.layer3 = ResBlock(256, 512, 1, p)
|
61 |
+
self.max_pool = nn.MaxPool2d(4, 4)
|
62 |
+
self.linear = nn.Linear(512, num_classes)
|
63 |
+
|
64 |
+
def forward(self, x):
|
65 |
+
out = self.conv(x)
|
66 |
+
out = self.layer1(out)
|
67 |
+
out = self.layer2(out)
|
68 |
+
out = self.layer3(out)
|
69 |
+
out = self.max_pool(out)
|
70 |
+
out = out.view(out.size(0), -1)
|
71 |
+
out = self.linear(out)
|
72 |
+
return F.log_softmax(out, dim=1)
|
README.MD
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# CIFAR10 Trained on Custom ResNet Model with GradCAM
|
3 |
+
|
4 |
+
This is a simple Gradio interface that allows you to perform inference on a CIFAR10 dataset trained on a Custom ResNet model and visualize the GradCAM results. GradCAM (Gradient-weighted Class Activation Mapping) is a technique that highlights the regions in an image that are important for the model's prediction.
|
5 |
+
|
6 |
+
## How to Use the Interface
|
7 |
+
|
8 |
+
1. Upload an image by clicking on the "Choose File" button. The image should have dimensions of 32x32 pixels.
|
9 |
+
|
10 |
+
2. **Show GradCAM Images**: Select this checkbox to visualize the GradCAM results on the uploaded image. GradCAM highlights the regions of the image that are important for the model's prediction.
|
11 |
+
|
12 |
+
3. **Number of GradCAM Images**: Enter the number of GradCAM images you want to visualize. This option is only effective when "Show GradCAM Images" is selected.
|
13 |
+
|
14 |
+
4. **Which Layer?**: Use the slider to choose the layer from which GradCAM will be computed. You can select from layers -3, -2, or -1. This option is only effective when "Show GradCAM Images" is selected.
|
15 |
+
|
16 |
+
5. **Opacity of GradCAM**: Use the slider to adjust the opacity of the GradCAM overlay on the image. This option is only effective when "Show GradCAM Images" is selected.
|
17 |
+
|
18 |
+
6. **Show Misclassified Images**: Select this checkbox to visualize a gallery of misclassified images. These images are randomly chosen from a folder of misclassified images.
|
19 |
+
|
20 |
+
7. **Number of Misclassified Images (max 10)**: Enter the number of misclassified images you want to visualize. This option is only effective when "Show Misclassified Images" is selected.
|
21 |
+
|
22 |
+
8. **Number of Top Classes (max 10)**: Enter the number of top classes to display along with their confidence scores.
|
23 |
+
|
24 |
+
9. Click the "Submit" button to perform inference and visualize the results.
|
25 |
+
|
26 |
+
## Output
|
27 |
+
|
28 |
+
1. **Top Classes**: The interface will display the top classes along with their confidence scores for the uploaded image.
|
29 |
+
|
30 |
+
2. **Output Image**: The interface will show the GradCAM result overlayed on the uploaded image. This image highlights the important regions that the model used for its prediction. If "Show GradCAM Images" is not selected, the original uploaded image will be shown.
|
31 |
+
|
32 |
+
3. **Misclassified Images**: If "Show Misclassified Images" is selected, the interface will display a gallery of misclassified images. These images are randomly chosen from a folder of misclassified images.
|
33 |
+
|
34 |
+
## Examples
|
35 |
+
|
36 |
+
The interface provides some example images along with pre-selected settings. Feel free to modify the settings or upload your own images to visualize the GradCAM results on different images.
|
37 |
+
|
38 |
+
Please note that the provided model is pre-trained and loaded from a checkpoint file. You can adjust the model and GradCAM settings in the code to experiment with different configurations.
|
app.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch, torchvision
|
2 |
+
from torchvision import transforms
|
3 |
+
import numpy as np
|
4 |
+
import gradio as gr
|
5 |
+
from PIL import Image
|
6 |
+
from pytorch_grad_cam import GradCAM
|
7 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
8 |
+
from custom_resnet import Assignment12Resnet
|
9 |
+
import random
|
10 |
+
import os
|
11 |
+
pl_model = Assignment12Resnet().load_from_checkpoint("epoch=22-step=4140.ckpt",map_location=torch.device("cpu"))
|
12 |
+
|
13 |
+
inv_normalize = transforms.Normalize(
|
14 |
+
mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23],
|
15 |
+
std=[1/0.23, 1/0.23, 1/0.23]
|
16 |
+
)
|
17 |
+
classes = ('plane', 'car', 'bird', 'cat', 'deer',
|
18 |
+
'dog', 'frog', 'horse', 'ship', 'truck')
|
19 |
+
model = pl_model.model
|
20 |
+
model_dict = dict(zip([-3,-2,-1],[pl_model.model.layer3.transition_block.transition_block,pl_model.model.layer3.conv_block1.conv_bn_block,pl_model.model.layer3.conv_block2.conv_bn_block]))
|
21 |
+
# Function to load images from a folder
|
22 |
+
def load_images_from_folder(num_misclassified,folder=None):
|
23 |
+
print(type(num_misclassified))
|
24 |
+
images = []
|
25 |
+
for filename in os.listdir(folder):
|
26 |
+
if filename.endswith(".jpg") or filename.endswith(".png"):
|
27 |
+
img = Image.open(os.path.join(folder, filename))
|
28 |
+
images.append(img)
|
29 |
+
return random.choices(images, k=int(num_misclassified))
|
30 |
+
def inference(input_img, show_gradcam = True ,num_gradcam_images=1, target_layer_number =-1,opacity= 0.5,show_misclassified = True,num_misclassified_images =10,num_top_classes=3):
|
31 |
+
#transform = pl_model.test_transform()
|
32 |
+
org_img = input_img
|
33 |
+
input_img = pl_model.test_transform(input_img)
|
34 |
+
input_img = input_img.unsqueeze(0)
|
35 |
+
model.eval()
|
36 |
+
outputs = model(input_img)
|
37 |
+
softmax = torch.nn.Softmax(dim=0)
|
38 |
+
o = softmax(outputs.flatten())
|
39 |
+
confidences = {classes[i]: float(o[i]) for i in range(10)}
|
40 |
+
_, prediction = torch.max(outputs, 1)
|
41 |
+
if show_gradcam:
|
42 |
+
target_layers = model_dict[target_layer_number]
|
43 |
+
cam = GradCAM(model=model, target_layers=target_layers, use_cuda=False)
|
44 |
+
grayscale_cam = cam(input_tensor=input_img, targets=None)
|
45 |
+
grayscale_cam = grayscale_cam[0, :]
|
46 |
+
img = input_img.squeeze(0)
|
47 |
+
img = inv_normalize(img)
|
48 |
+
rgb_img = np.transpose(img, (1, 2, 0))
|
49 |
+
rgb_img = rgb_img.numpy()
|
50 |
+
visualization = show_cam_on_image(org_img/255, grayscale_cam, use_rgb=True, image_weight=opacity)
|
51 |
+
else:
|
52 |
+
visualization = org_img
|
53 |
+
|
54 |
+
misclassified_images = None
|
55 |
+
if show_misclassified:
|
56 |
+
misclassified_images = load_images_from_folder(num_misclassified_images,folder = './misclassified_images')
|
57 |
+
|
58 |
+
return confidences, visualization, misclassified_images
|
59 |
+
|
60 |
+
title = "CIFAR10 trained on Custom ResNet Model with GradCAM"
|
61 |
+
description = "A simple Gradio interface to infer on Custom ResNet model and get GradCAM results"
|
62 |
+
examples = [["cat.jpg",True,1,-2, 0.5, True,5,3], ["dog.jpg",True,1,-2, 0.5, True,5,3 ],["bird.jpg",True,1,-2, 0.5, True,5,3],["ship.jpg",True,1,-2, 0.5, True,5,3],["truck.jpg",True,1,-2, 0.5, True,5,3],["deer.jpg",True,1,-2, 0.5, True,5,3],["frog.jpg",True,1,-2, 0.5, True,5,3],["horse.jpg",True,1,-2, 0.5, True,5,3],["plane.jpg",True,1,-2, 0.5, True,5,3]]
|
63 |
+
|
64 |
+
demo = gr.Interface(inference,inputs=[ gr.Image(shape=(32, 32)),
|
65 |
+
gr.Checkbox(value=True,label="Show GradCAM Images",show_label=True),
|
66 |
+
gr.Number(value=1, label="Number of GradCAM Images", minimum=1, maximum=1),
|
67 |
+
gr.Slider(minimum = -3,maximum=-1, value=-1, step=1, label="Which Layer?"),
|
68 |
+
gr.Slider(minimum =0, maximum = 1.0, value=0.5, label="Opacity of GradCAM"),
|
69 |
+
gr.Checkbox(label="Show Misclassified Images", value=True,show_label=True),
|
70 |
+
gr.Number(value=5, label="Number of Misclassified Images (max 10)", minimum=1, maximum=10),
|
71 |
+
gr.Number(value=3, label="Number of Top Classes (max 10)", minimum=1, maximum=10)
|
72 |
+
],
|
73 |
+
outputs=[
|
74 |
+
gr.Label(num_top_classes=3),
|
75 |
+
gr.Image(shape=(32, 32), label="Output").style(width=128, height=128),
|
76 |
+
gr.Gallery(label="Misclassified Images")
|
77 |
+
],
|
78 |
+
title=title,
|
79 |
+
description=description,
|
80 |
+
examples=examples,
|
81 |
+
)
|
82 |
+
|
83 |
+
# Launch the Gradio interface
|
84 |
+
demo.launch()
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
bird.jpg
ADDED
![]() |
cat.jpg
ADDED
![]() |
custom_resnet.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#myaddition
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
from torch.optim.lr_scheduler import LambdaLR
|
5 |
+
import torch.optim as optim
|
6 |
+
from pytorch_lightning import LightningModule
|
7 |
+
from Custom_Resnet_v1 import CustomResNet
|
8 |
+
from torch import nn
|
9 |
+
from torch.nn import functional as F
|
10 |
+
from torch.utils.data import DataLoader, random_split
|
11 |
+
import torchvision
|
12 |
+
from torchmetrics.functional import accuracy
|
13 |
+
from torchvision.datasets import CIFAR10
|
14 |
+
from data_transform_cifar10_custom_resnet import get_train_transform, get_test_transform
|
15 |
+
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
|
16 |
+
AVAIL_GPUS = min(1, torch.cuda.device_count())
|
17 |
+
BATCH_SIZE = 256 if AVAIL_GPUS else 64
|
18 |
+
from cyclic_lr_util import custom_one_cycle_lr
|
19 |
+
one_cyle_lr = custom_one_cycle_lr(no_of_images=50176, batch_size=2, base_lr=0.04, max_lr=0.4, final_lr=0.004, epoch_stage1=5, epoch_stage2=18, total_epochs=24)
|
20 |
+
class Assignment12Resnet(LightningModule):
|
21 |
+
def __init__(self,lr=0.05,data_dir=PATH_DATASETS):
|
22 |
+
super().__init__()
|
23 |
+
# Set our init args as class attributes
|
24 |
+
self.data_dir = data_dir
|
25 |
+
self.learning_rate = lr
|
26 |
+
|
27 |
+
# Hardcode some dataset specific attributes
|
28 |
+
self.num_classes = 10
|
29 |
+
self.train_transform = get_train_transform()
|
30 |
+
self.test_transform = get_test_transform()
|
31 |
+
self.cifar10_trainset = None
|
32 |
+
self.cifar10_testset = None
|
33 |
+
self.save_hyperparameters()
|
34 |
+
self.model = CustomResNet()
|
35 |
+
|
36 |
+
def forward(self, x):
|
37 |
+
out = self.model(x)
|
38 |
+
return out
|
39 |
+
|
40 |
+
def training_step(self, batch, batch_idx):
|
41 |
+
x, y = batch
|
42 |
+
logits = self(x)
|
43 |
+
loss = F.nll_loss(logits, y)
|
44 |
+
self.log("train_loss", loss)
|
45 |
+
return loss
|
46 |
+
|
47 |
+
def evaluate(self, batch, stage=None):
|
48 |
+
x, y = batch
|
49 |
+
logits = self(x)
|
50 |
+
loss = F.nll_loss(logits, y)
|
51 |
+
preds = torch.argmax(logits, dim=1)
|
52 |
+
acc = accuracy(preds, y,task="multiclass", num_classes=10)
|
53 |
+
|
54 |
+
if stage:
|
55 |
+
self.log(f"{stage}_loss", loss, prog_bar=True)
|
56 |
+
self.log(f"{stage}_acc", acc, prog_bar=True)
|
57 |
+
|
58 |
+
def validation_step(self, batch, batch_idx):
|
59 |
+
self.evaluate(batch, "val")
|
60 |
+
|
61 |
+
def test_step(self, batch, batch_idx):
|
62 |
+
self.evaluate(batch, "test")
|
63 |
+
|
64 |
+
def configure_optimizers(self):
|
65 |
+
optimizer = optim.SGD(self.model.parameters(), lr=0.04, momentum=0.9)
|
66 |
+
steps_per_epoch = 45000 // BATCH_SIZE
|
67 |
+
scheduler_dict = {
|
68 |
+
"scheduler": torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=[one_cyle_lr]),
|
69 |
+
"interval": "step",
|
70 |
+
}
|
71 |
+
return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
|
72 |
+
####################
|
73 |
+
# DATA RELATED HOOKS
|
74 |
+
####################
|
75 |
+
|
76 |
+
def prepare_data(self):
|
77 |
+
# download
|
78 |
+
CIFAR10(self.data_dir, train=True, download=True)
|
79 |
+
CIFAR10(self.data_dir, train=False, download=True)
|
80 |
+
|
81 |
+
def setup(self, stage=None):
|
82 |
+
|
83 |
+
# Assign train/val datasets for use in dataloaders
|
84 |
+
if stage == "fit" or stage is None:
|
85 |
+
cifar10_trainset = torchvision.datasets.CIFAR10(root=self.data_dir, train=True, download=True, transform=self.train_transform)
|
86 |
+
self.cifar_train, self.cifar_val = random_split(cifar10_trainset, [46000, 4000])
|
87 |
+
# Assign test dataset for use in dataloader(s)
|
88 |
+
if stage == "test" or stage is None:
|
89 |
+
self.cifar10_testset= torchvision.datasets.CIFAR10(root=self.data_dir, train=False, download=True, transform=self.test_transform)
|
90 |
+
|
91 |
+
def train_dataloader(self):
|
92 |
+
return torch.utils.data.DataLoader(self.cifar_train, batch_size=BATCH_SIZE, shuffle=True, num_workers=os.cpu_count())
|
93 |
+
|
94 |
+
def val_dataloader(self):
|
95 |
+
return torch.utils.data.DataLoader(self.cifar_val, batch_size=BATCH_SIZE,shuffle=False, num_workers=os.cpu_count())
|
96 |
+
|
97 |
+
def test_dataloader(self):
|
98 |
+
return torch.utils.data.DataLoader(self.cifar10_testset, batch_size=BATCH_SIZE, shuffle=False,num_workers=os.cpu_count())
|
cyclic_lr_util.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import matplotlib.pyplot as plt
|
3 |
+
import numpy as np
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
def triangle_lr_plot(lr_min, lr_max, step_size, iterations):
|
6 |
+
lr_list = []
|
7 |
+
it_list = [j for j in range(iterations + 1)]
|
8 |
+
for i in range(iterations + 1):
|
9 |
+
half_cycle_count = np.floor(i/step_size)
|
10 |
+
x = i - half_cycle_count * step_size
|
11 |
+
if half_cycle_count % 2 == 0:
|
12 |
+
lr = lr_min + x*(lr_max-lr_min)/step_size
|
13 |
+
else:
|
14 |
+
lr = lr_max - x*(lr_max-lr_min)/step_size
|
15 |
+
lr_list.append(lr)
|
16 |
+
fig, axs = plt.subplots(figsize=(5,5))
|
17 |
+
axs.plot(it_list, lr_list)
|
18 |
+
plt.title("CLR - 'triangular' Policy")
|
19 |
+
plt.xlabel("Iteration")
|
20 |
+
plt.ylabel("Learning Rate")
|
21 |
+
plt.show()
|
22 |
+
|
23 |
+
def custom_one_cycle_lr(no_of_images, batch_size, base_lr, max_lr, final_lr, epoch_stage1=5, epoch_stage2=18, total_epochs=24):
|
24 |
+
lr_schedule = lambda t: np.interp([t], [0, epoch_stage1, epoch_stage2, total_epochs], [base_lr, max_lr, base_lr, final_lr])[0]
|
25 |
+
lr_lambda = lambda it: lr_schedule(it * batch_size/no_of_images)
|
26 |
+
|
27 |
+
return lr_lambda
|
28 |
+
|
29 |
+
def max_lr_finder_schedule(no_of_images, batch_size, base_lr, max_lr, total_epochs=5):
|
30 |
+
lr_finder_schedule = lambda t: np.interp([t], [0, total_epochs], [base_lr, max_lr])[0]
|
31 |
+
lr_finder_lambda = lambda it: lr_finder_schedule(it * batch_size/no_of_images)
|
32 |
+
|
33 |
+
return lr_finder_lambda
|
data_transform_cifar10_custom_resnet.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#from PIL import Image
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from albumentations import Compose, RandomCrop, Normalize, HorizontalFlip, Resize,GaussNoise, PadIfNeeded,ShiftScaleRotate, CoarseDropout,ToGray
|
5 |
+
from albumentations.augmentations.dropout import Cutout
|
6 |
+
from albumentations.pytorch import ToTensorV2
|
7 |
+
|
8 |
+
|
9 |
+
class album_Compose_train:
|
10 |
+
def __init__(self):
|
11 |
+
self.transform = Compose(
|
12 |
+
[
|
13 |
+
PadIfNeeded(min_height=48, min_width=48, border_mode=cv2.BORDER_CONSTANT, value=[0.4914*255, 0.4822*255, 0.4465*255], p=1.0),
|
14 |
+
RandomCrop(32,32, p=1.0),
|
15 |
+
Cutout(num_holes=1, max_h_size=8, max_w_size=8, fill_value=[0.4914*255, 0.4822*255, 0.4465*255]),
|
16 |
+
HorizontalFlip(p=0.2),
|
17 |
+
#GaussNoise(p=0.15),
|
18 |
+
#ElasticTransform(p=0.15),
|
19 |
+
Normalize((0.4914, 0.4822, 0.4465), ((0.2023, 0.1994, 0.2010))),
|
20 |
+
ToTensorV2(),
|
21 |
+
])
|
22 |
+
def __call__(self, img):
|
23 |
+
img = np.array(img)
|
24 |
+
img = self.transform(image=img)['image']
|
25 |
+
return img
|
26 |
+
|
27 |
+
class album_Compose_test:
|
28 |
+
def __init__(self):
|
29 |
+
self.transform = Compose(
|
30 |
+
[
|
31 |
+
Normalize((0.4914, 0.4822, 0.4465), ((0.2023, 0.1994, 0.2010))),
|
32 |
+
ToTensorV2(),
|
33 |
+
])
|
34 |
+
def __call__(self, img):
|
35 |
+
img = np.array(img)
|
36 |
+
img = self.transform(image=img)['image']
|
37 |
+
return img
|
38 |
+
|
39 |
+
def get_train_transform():
|
40 |
+
transform = album_Compose_train()
|
41 |
+
return transform
|
42 |
+
|
43 |
+
def get_test_transform():
|
44 |
+
transform = album_Compose_test()
|
45 |
+
return transform
|
deer.jpg
ADDED
![]() |
Git LFS Details
|
dog.jpg
ADDED
![]() |
epoch=22-step=4140.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dbd5ef521d2b49bd3a91cc133144bac00996ebf815cf69ced6129fa8bf991b1b
|
3 |
+
size 52633150
|
frog.jpg
ADDED
![]() |
horse.jpg
ADDED
![]() |
plane.jpg
ADDED
![]() |
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
torch-lr-finder
|
4 |
+
grad-cam
|
5 |
+
pillow
|
6 |
+
numpy
|
7 |
+
pytorch_lightning
|
8 |
+
albumentations
|
9 |
+
pytorch-gradcam
|
10 |
+
gradio
|
ship.jpg
ADDED
![]() |
Git LFS Details
|
truck.jpg
ADDED
![]() |