Spaces:
Runtime error
Runtime error
# Copyright (C) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. | |
# | |
# This work is made available under the Nvidia Source Code License-NC. | |
# To view a copy of this license, check out LICENSE.md | |
# The file is duplicated from https://github.com/NVIDIA/flownet2-pytorch | |
# with some modifications. | |
import torch | |
import torch.nn as nn | |
import numpy as np | |
def conv(use_batch_norm, in_planes, out_planes, kernel_size=3, stride=1): | |
if use_batch_norm: | |
return nn.Sequential( | |
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, | |
stride=stride, padding=(kernel_size - 1) // 2, | |
bias=False), | |
nn.BatchNorm2d(out_planes), | |
nn.LeakyReLU(0.1, inplace=True) | |
) | |
else: | |
return nn.Sequential( | |
nn.Conv2d( | |
in_planes, | |
out_planes, | |
kernel_size=kernel_size, | |
stride=stride, | |
padding=( | |
kernel_size - 1) // 2, | |
bias=True), | |
nn.LeakyReLU( | |
0.1, | |
inplace=True)) | |
def i_conv(use_batch_norm, in_planes, out_planes, kernel_size=3, stride=1, | |
bias=True): | |
if use_batch_norm: | |
return nn.Sequential( | |
nn.Conv2d( | |
in_planes, | |
out_planes, | |
kernel_size=kernel_size, | |
stride=stride, | |
padding=( | |
kernel_size - 1) // 2, | |
bias=bias), | |
nn.BatchNorm2d(out_planes), | |
) | |
else: | |
return nn.Sequential( | |
nn.Conv2d( | |
in_planes, | |
out_planes, | |
kernel_size=kernel_size, | |
stride=stride, | |
padding=( | |
kernel_size - | |
1) // | |
2, | |
bias=bias), | |
) | |
def predict_flow(in_planes): | |
return nn.Conv2d(in_planes, 2, kernel_size=3, stride=1, padding=1, | |
bias=True) | |
def deconv(in_planes, out_planes): | |
return nn.Sequential( | |
nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, | |
padding=1, bias=True), | |
nn.LeakyReLU(0.1, inplace=True) | |
) | |
class tofp16(nn.Module): | |
def __init__(self): | |
super(tofp16, self).__init__() | |
def forward(self, input): | |
return input.half() | |
class tofp32(nn.Module): | |
def __init__(self): | |
super(tofp32, self).__init__() | |
def forward(self, input): | |
return input.float() | |
def init_deconv_bilinear(weight): | |
f_shape = weight.size() | |
heigh, width = f_shape[-2], f_shape[-1] | |
f = np.ceil(width / 2.0) | |
c = (2 * f - 1 - f % 2) / (2.0 * f) | |
bilinear = np.zeros([heigh, width]) | |
for x in range(width): | |
for y in range(heigh): | |
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) | |
bilinear[x, y] = value | |
weight.data.fill_(0.) | |
for i in range(f_shape[0]): | |
for j in range(f_shape[1]): | |
weight.data[i, j, :, :] = torch.from_numpy(bilinear) | |
def save_grad(grads, name): | |
def hook(grad): | |
grads[name] = grad | |
return hook | |