python_code
stringlengths 0
258k
|
---|
import numpy as np
import cv2, pdb, glob, argparse
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2,masksDL):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
akaze = cv2.AKAZE_create()
keypoints1, descriptors1 = akaze.detectAndCompute(im1, None)
keypoints2, descriptors2 = akaze.detectAndCompute(im2, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
# copy image in the empty region, unless it is a foreground. Then copy background
mask_rep=(np.sum(im1Reg.astype('float32'),axis=2)==0)
im1Reg[mask_rep,0]=im2[mask_rep,0]
im1Reg[mask_rep,1]=im2[mask_rep,1]
im1Reg[mask_rep,2]=im2[mask_rep,2]
mask_rep1=np.logical_and(mask_rep , masksDL[...,0]==255)
im1Reg[mask_rep1,0]=im1[mask_rep1,0]
im1Reg[mask_rep1,1]=im1[mask_rep1,1]
im1Reg[mask_rep1,2]=im1[mask_rep1,2]
return im1Reg
def adjustExposure(img,back,mask):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
mask = cv2.dilate(mask, kernel, iterations=10)
mask1 = cv2.dilate(mask, kernel, iterations=300)
msk=mask1.astype(np.float32)/255-mask.astype(np.float32)/255; msk=msk.astype(np.bool)
back_tr=back
back_tr[...,0]=bias_gain(img[...,0],back[...,0],msk)
back_tr[...,1]=bias_gain(img[...,1],back[...,1],msk)
back_tr[...,2]=bias_gain(img[...,2],back[...,2],msk)
return back_tr
def bias_gain(orgR,capR,cap_mask):
capR=capR.astype('float32')
orgR=orgR.astype('float32')
xR=capR[cap_mask]
yR=orgR[cap_mask]
gainR=np.nanstd(yR)/np.nanstd(xR);
biasR=np.nanmean(yR)-gainR*np.nanmean(xR);
cap_tran=capR*gainR+biasR;
return cap_tran.astype('float32')
parser = argparse.ArgumentParser(description='Deeplab Segmentation')
parser.add_argument('-i', '--input_dir', type=str, required=True,help='Directory to save the output results. (required)')
args=parser.parse_args()
dir_name=args.input_dir
list_im=glob.glob(dir_name + '/*_img.png'); list_im.sort()
for i in range(0,len(list_im)):
image = cv2.imread(list_im[i],cv2.IMREAD_COLOR)
back = cv2.imread(list_im[i].replace('img','back'),cv2.IMREAD_COLOR)
mask = cv2.imread(list_im[i].replace('img','masksDL'))
#back_new = adjustExposure(image,back,mask[...,0])
back_align = alignImages(back, image,mask)
cv2.imwrite(list_im[i].replace('img','back'),back_align)
str_msg='\nDone: ' + dir_name
print(str_msg) |
from __future__ import print_function
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import os
import time
import argparse
from data_loader import AdobeDataAffineHR
from functions import *
from networks import ResnetConditionHR, conv_init
from loss_functions import alpha_loss, compose_loss, alpha_gradient_loss
#CUDA
#os.environ["CUDA_VISIBLE_DEVICES"]="4"
print('CUDA Device: ' + os.environ["CUDA_VISIBLE_DEVICES"])
"""Parses arguments."""
parser = argparse.ArgumentParser(description='Training Background Matting on Adobe Dataset.')
parser.add_argument('-n', '--name', type=str, help='Name of tensorboard and model saving folders.')
parser.add_argument('-bs', '--batch_size', type=int, help='Batch Size.')
parser.add_argument('-res', '--reso', type=int, help='Input image resolution')
parser.add_argument('-epoch', '--epoch', type=int, default=60,help='Maximum Epoch')
parser.add_argument('-n_blocks1', '--n_blocks1', type=int, default=7,help='Number of residual blocks after Context Switching.')
parser.add_argument('-n_blocks2', '--n_blocks2', type=int, default=3,help='Number of residual blocks for Fg and alpha each.')
args=parser.parse_args()
##Directories
tb_dir='TB_Summary/' + args.name
model_dir='Models/' + args.name
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if not os.path.exists(tb_dir):
os.makedirs(tb_dir)
## Input list
data_config_train = {'reso': [args.reso,args.reso], 'trimapK': [5,5], 'noise': True} # choice for data loading parameters
# DATA LOADING
print('\n[Phase 1] : Data Preparation')
def collate_filter_none(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
#Original Data
traindata = AdobeDataAffineHR(csv_file='Data_adobe/Adobe_train_data.csv',data_config=data_config_train,transform=None) #Write a dataloader function that can read the database provided by .csv file
train_loader = torch.utils.data.DataLoader(traindata, batch_size=args.batch_size, shuffle=True, num_workers=args.batch_size, collate_fn=collate_filter_none)
print('\n[Phase 2] : Initialization')
net=ResnetConditionHR(input_nc=(3,3,1,4), output_nc=4, n_blocks1=7, n_blocks2=3, norm_layer=nn.BatchNorm2d)
net.apply(conv_init)
net=nn.DataParallel(net)
#net.load_state_dict(torch.load(model_dir + 'net_epoch_X')) #uncomment this if you are initializing your model
net.cuda()
torch.backends.cudnn.benchmark=True
#Loss
l1_loss=alpha_loss()
c_loss=compose_loss()
g_loss=alpha_gradient_loss()
optimizer = optim.Adam(net.parameters(), lr=1e-4)
#optimizer.load_state_dict(torch.load(model_dir + 'optim_epoch_X')) #uncomment this if you are initializing your model
log_writer=SummaryWriter(tb_dir)
print('Starting Training')
step=50 #steps to visualize training images in tensorboard
KK=len(train_loader)
for epoch in range(0,args.epoch):
net.train();
netL, alL, fgL, fg_cL, al_fg_cL, elapse_run, elapse=0,0,0,0,0,0,0
t0=time.time();
testL=0; ct_tst=0;
for i,data in enumerate(train_loader):
#Initiating
fg, bg, alpha, image, seg, bg_tr, multi_fr = data['fg'], data['bg'], data['alpha'], data['image'], data['seg'], data['bg_tr'], data['multi_fr']
fg, bg, alpha, image, seg, bg_tr, multi_fr = Variable(fg.cuda()), Variable(bg.cuda()), Variable(alpha.cuda()), Variable(image.cuda()), Variable(seg.cuda()), Variable(bg_tr.cuda()), Variable(multi_fr.cuda())
mask=(alpha>-0.99).type(torch.cuda.FloatTensor)
mask0=Variable(torch.ones(alpha.shape).cuda())
tr0=time.time()
alpha_pred,fg_pred=net(image,bg_tr,seg,multi_fr)
## Put needed loss here
al_loss=l1_loss(alpha,alpha_pred,mask0)
fg_loss=l1_loss(fg,fg_pred,mask)
al_mask=(alpha_pred>0.95).type(torch.cuda.FloatTensor)
fg_pred_c=image*al_mask + fg_pred*(1-al_mask)
fg_c_loss= c_loss(image,alpha_pred,fg_pred_c,bg,mask0)
al_fg_c_loss=g_loss(alpha,alpha_pred,mask0)
loss=al_loss + 2*fg_loss + fg_c_loss + al_fg_c_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
netL += loss.data
alL += al_loss.data
fgL += fg_loss.data
fg_cL += fg_c_loss.data
al_fg_cL += al_fg_c_loss.data
log_writer.add_scalar('training_loss', loss.data, epoch*KK + i + 1)
log_writer.add_scalar('alpha_loss', al_loss.data, epoch*KK + i + 1)
log_writer.add_scalar('fg_loss', fg_loss.data, epoch*KK + i + 1)
log_writer.add_scalar('comp_loss', fg_c_loss.data, epoch*KK + i + 1)
log_writer.add_scalar('alpha_gradient_loss', al_fg_c_loss.data, epoch*KK + i + 1)
t1=time.time()
elapse +=t1 -t0
elapse_run += t1-tr0
t0=t1
testL+=loss.data
ct_tst+=1
if i % step == (step-1):
print('[%d, %5d] Total-loss: %.4f Alpha-loss: %.4f Fg-loss: %.4f Comp-loss: %.4f Alpha-gradient-loss: %.4f Time-all: %.4f Time-fwbw: %.4f' % (epoch + 1, i + 1, netL/step, alL/step, fgL/step, fg_cL/step, al_fg_cL/step, elapse/step, elapse_run/step))
netL, alL, fgL, fg_cL, al_fg_cL, elapse_run, elapse=0,0,0,0,0,0,0
write_tb_log(image,'image',log_writer,i)
write_tb_log(seg,'seg',log_writer,i)
write_tb_log(alpha,'alpha',log_writer,i)
write_tb_log(alpha_pred,'alpha_pred',log_writer,i)
write_tb_log(fg*mask,'fg',log_writer,i)
write_tb_log(fg_pred*mask,'fg_pred',log_writer,i)
write_tb_log(multi_fr[0:4,0,...].unsqueeze(1),'multi_fr',log_writer,i)
#composition
alpha_pred=(alpha_pred+1)/2
comp=fg_pred*alpha_pred + (1-alpha_pred)*bg
write_tb_log(comp,'composite',log_writer,i)
del comp
del fg, bg, alpha, image, alpha_pred, fg_pred, seg, multi_fr
#Saving
torch.save(net.state_dict(), model_dir + 'net_epoch_%d_%.4f.pth' %(epoch,testL/ct_tst))
torch.save(optimizer.state_dict(), model_dir + 'optim_epoch_%d_%.4f.pth' %(epoch,testL/ct_tst))
|
import numpy as np
import cv2, pdb, glob, argparse
MAX_FEATURES = 500
GOOD_MATCH_PERCENT = 0.15
def alignImages(im1, im2,masksDL):
# Convert images to grayscale
im1Gray = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2Gray = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
akaze = cv2.AKAZE_create()
keypoints1, descriptors1 = akaze.detectAndCompute(im1, None)
keypoints2, descriptors2 = akaze.detectAndCompute(im2, None)
# Match features.
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE)
matches = matcher.match(descriptors1, descriptors2, None)
# Sort matches by score
matches.sort(key=lambda x: x.distance, reverse=False)
# Remove not so good matches
numGoodMatches = int(len(matches) * GOOD_MATCH_PERCENT)
matches = matches[:numGoodMatches]
# Extract location of good matches
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = keypoints1[match.queryIdx].pt
points2[i, :] = keypoints2[match.trainIdx].pt
# Find homography
h, mask = cv2.findHomography(points1, points2, cv2.RANSAC)
# Use homography
height, width, channels = im2.shape
im1Reg = cv2.warpPerspective(im1, h, (width, height))
# copy image in the empty region, unless it is a foreground. Then copy background
mask_rep=(np.sum(im1Reg.astype('float32'),axis=2)==0)
im1Reg[mask_rep,0]=im2[mask_rep,0]
im1Reg[mask_rep,1]=im2[mask_rep,1]
im1Reg[mask_rep,2]=im2[mask_rep,2]
mask_rep1=np.logical_and(mask_rep , masksDL[...,0]==255)
im1Reg[mask_rep1,0]=im1[mask_rep1,0]
im1Reg[mask_rep1,1]=im1[mask_rep1,1]
im1Reg[mask_rep1,2]=im1[mask_rep1,2]
return im1Reg
def adjustExposure(img,back,mask):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
mask = cv2.dilate(mask, kernel, iterations=10)
mask1 = cv2.dilate(mask, kernel, iterations=300)
msk=mask1.astype(np.float32)/255-mask.astype(np.float32)/255; msk=msk.astype(np.bool)
bias=np.zeros((1,3)); gain=np.ones((1,3))
bias[0,0],gain[0,0]=bias_gain(img[...,0],back[...,0],msk)
bias[0,1],gain[0,1]=bias_gain(img[...,1],back[...,1],msk)
bias[0,2],gain[0,2]=bias_gain(img[...,2],back[...,2],msk)
return bias,gain
def bias_gain(orgR,capR,cap_mask):
xR=capR[cap_mask]
yR=orgR[cap_mask]
gainR=np.nanstd(yR)/np.nanstd(xR);
biasR=np.nanmean(yR)-gainR*np.nanmean(xR);
return biasR,gainR
parser = argparse.ArgumentParser(description='Deeplab Segmentation')
parser.add_argument('-i', '--input_dir', type=str, required=True,help='Directory to save the output results. (required)')
parser.add_argument('-v_name','--video_name',type=str, default=None,help='Name of the video')
args=parser.parse_args()
dir_name=args.input_dir
list_im=glob.glob(dir_name + '/*_img.png'); list_im.sort()
back=cv2.imread(args.video_name);
# back=back.astype('float32')/255
# #adjust bias-gain
# bias=[]; gain=[]
# for i in range(0,len(list_im),30):
# image = cv2.imread(list_im[i]); image=image.astype('float32')/255
# mask = cv2.imread(list_im[i].replace('img','masksDL'))
# b,g=adjustExposure(image,back,mask[...,0])
# bias.append(b); gain.append(g)
# Bias=np.median(np.asarray(bias),axis=0).squeeze(0);
# Gain=np.median(np.asarray(gain),axis=0).squeeze(0)
# back_new=back
# back_new[...,0]=Gain[0]*back[...,0]+Bias[0]
# back_new[...,1]=Gain[1]*back[...,1]+Bias[1]
# back_new[...,2]=Gain[2]*back[...,2]+Bias[2]
# back_new=(255*back_new).astype(np.uint8)
for i in range(0,len(list_im)):
image = cv2.imread(list_im[i])
mask = cv2.imread(list_im[i].replace('img','masksDL'))
back_align = alignImages(back, image,mask)
cv2.imwrite(list_im[i].replace('img','back'),back_align)
print('Done: ' + str(i+1) + '/' + str(len(list_im)))
|
from __future__ import print_function, division
import os
import torch
import pandas as pd
import skimage
from skimage import io
import numpy as np
import matplotlib.pyplot as plt
import pdb, random
from torch.utils.data import Dataset, DataLoader
import random, os, cv2
unknown_code=128
class VideoData(Dataset):
def __init__(self,csv_file,data_config,transform=None):
self.frames = pd.read_csv(csv_file,sep=';')
self.transform = transform
self.resolution=data_config['reso']
def __len__(self):
return len(self.frames)
def __getitem__(self,idx):
img = io.imread(self.frames.iloc[idx, 0])
back = io.imread(self.frames.iloc[idx, 1])
seg = io.imread(self.frames.iloc[idx, 2])
fr1 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 3]), cv2.COLOR_BGR2GRAY)
fr2 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 4]), cv2.COLOR_BGR2GRAY)
fr3 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 5]), cv2.COLOR_BGR2GRAY)
fr4 = cv2.cvtColor(io.imread(self.frames.iloc[idx, 6]), cv2.COLOR_BGR2GRAY)
back_rnd = io.imread(self.frames.iloc[idx, 7])
sz=self.resolution
if np.random.random_sample() > 0.5:
img = cv2.flip(img,1)
seg = cv2.flip(seg,1)
back = cv2.flip(back,1)
back_rnd = cv2.flip(back_rnd,1)
fr1=cv2.flip(fr1,1); fr2=cv2.flip(fr2,1); fr3=cv2.flip(fr3,1); fr4=cv2.flip(fr4,1)
#make frames together
multi_fr=np.zeros((img.shape[0],img.shape[1],4))
multi_fr[...,0]=fr1; multi_fr[...,1]=fr2; multi_fr[...,2]=fr3; multi_fr[...,3]=fr4;
#allow random cropping centered on the segmentation map
bbox=create_bbox(seg,seg.shape[0],seg.shape[1])
img=apply_crop(img,bbox,self.resolution)
seg=apply_crop(seg,bbox,self.resolution)
back=apply_crop(back,bbox,self.resolution)
back_rnd=apply_crop(back_rnd,bbox,self.resolution)
multi_fr=apply_crop(multi_fr,bbox,self.resolution)
#convert seg to guidance map
#segg=create_seg_guide(seg,self.resolution)
sample = {'image': to_tensor(img), 'seg': to_tensor(create_seg_guide(seg,self.resolution)), 'bg': to_tensor(back), 'multi_fr': to_tensor(multi_fr), 'seg-gt':to_tensor(seg), 'back-rnd': to_tensor(back_rnd)}
if self.transform:
sample = self.transform(sample)
return sample
class AdobeDataAffineHR(Dataset):
def __init__(self,csv_file,data_config,transform=None):
self.frames = pd.read_csv(csv_file,sep=';')
self.transform = transform
self.resolution=data_config['reso']
self.trimapK=data_config['trimapK']
self.noise=data_config['noise']
def __len__(self):
return len(self.frames)
def __getitem__(self,idx):
try:
#load
fg = io.imread(self.frames.iloc[idx, 0])
alpha = io.imread(self.frames.iloc[idx, 1])
image = io.imread(self.frames.iloc[idx, 2])
back = io.imread(self.frames.iloc[idx, 3])
fg = cv2.resize(fg, dsize=(800,800))
alpha = cv2.resize(alpha, dsize=(800,800))
back = cv2.resize(back, dsize=(800,800))
image = cv2.resize(image, dsize=(800,800))
sz=self.resolution
#random flip
if np.random.random_sample() > 0.5:
alpha = cv2.flip(alpha,1)
fg = cv2.flip(fg,1)
back = cv2.flip(back,1)
image = cv2.flip(image,1)
trimap=generate_trimap(alpha,self.trimapK[0],self.trimapK[1],False)
#randcom crop+scale
different_sizes = [(576,576),(608,608),(640,640),(672,672),(704,704),(736,736),(768,768),(800,800)]
crop_size = random.choice(different_sizes)
x, y = random_choice(trimap, crop_size)
fg = safe_crop(fg, x, y, crop_size,sz)
alpha = safe_crop(alpha, x, y, crop_size,sz)
image = safe_crop(image, x, y, crop_size,sz)
back = safe_crop(back, x, y, crop_size,sz)
trimap = safe_crop(trimap, x, y, crop_size,sz)
#Perturb Background: random noise addition or gamma change
if self.noise:
if np.random.random_sample() > 0.6:
sigma=np.random.randint(low=2, high=6)
mu=np.random.randint(low=0, high=14)-7
back_tr=add_noise(back,mu,sigma)
else:
back_tr=skimage.exposure.adjust_gamma(back,np.random.normal(1,0.12))
#Create motion cues: transform foreground and create 4 additional images
affine_fr=np.zeros((fg.shape[0],fg.shape[1],4))
for t in range(0,4):
T=np.random.normal(0,5,(2,1)); theta=np.random.normal(0,7);
R=np.array([[np.cos(np.deg2rad(theta)), -np.sin(np.deg2rad(theta))],[np.sin(np.deg2rad(theta)), np.cos(np.deg2rad(theta))]])
sc=np.array([[1+np.random.normal(0,0.05), 0],[0,1]]); sh=np.array([[1, np.random.normal(0,0.05)*(np.random.random_sample() > 0.5)],[np.random.normal(0,0.05)*(np.random.random_sample() > 0.5), 1]]);
A=np.concatenate((sc*sh*R, T), axis=1);
fg_tr = cv2.warpAffine(fg.astype(np.uint8),A,(fg.shape[1],fg.shape[0]),flags=cv2.INTER_LINEAR,borderMode=cv2.BORDER_REFLECT)
alpha_tr = cv2.warpAffine(alpha.astype(np.uint8),A,(fg.shape[1],fg.shape[0]),flags=cv2.INTER_NEAREST,borderMode=cv2.BORDER_REFLECT)
sigma=np.random.randint(low=2, high=6)
mu=np.random.randint(low=0, high=14)-7
back_tr0=add_noise(back,mu,sigma)
affine_fr[...,t]=cv2.cvtColor(composite(fg_tr,back_tr0,alpha_tr), cv2.COLOR_BGR2GRAY)
sample = {'image': to_tensor(image), 'fg': to_tensor(fg), 'alpha': to_tensor(alpha), 'bg': to_tensor(back), 'trimap': to_tensor(trimap), 'bg_tr': to_tensor(back_tr), 'seg': to_tensor(create_seg(alpha,trimap)), 'multi_fr': to_tensor(affine_fr)}
if self.transform:
sample = self.transform(sample)
return sample
except Exception as e:
print("Error loading: " + self.frames.iloc[idx, 0])
print(e)
#Functions
def create_seg_guide(rcnn,reso):
kernel_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernel_dil = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
rcnn=rcnn.astype(np.float32)/255; rcnn[rcnn>0.2]=1;
K=25
zero_id=np.nonzero(np.sum(rcnn,axis=1)==0)
del_id=zero_id[0][zero_id[0]>250]
if len(del_id)>0:
del_id=[del_id[0]-2,del_id[0]-1,*del_id]
rcnn=np.delete(rcnn,del_id,0)
rcnn = cv2.copyMakeBorder( rcnn, 0, K + len(del_id), 0, 0, cv2.BORDER_REPLICATE)
rcnn = cv2.erode(rcnn, kernel_er, iterations=np.random.randint(10,20))
rcnn = cv2.dilate(rcnn, kernel_dil, iterations=np.random.randint(3,7))
k_size_list=[(21,21),(31,31),(41,41)]
rcnn=cv2.GaussianBlur(rcnn.astype(np.float32),random.choice(k_size_list),0)
rcnn=(255*rcnn).astype(np.uint8)
rcnn=np.delete(rcnn, range(reso[0],reso[0]+K), 0)
return rcnn
def crop_holes(img,cx,cy,crop_size):
img[cy:cy+crop_size[0],cx:cx+crop_size[1]]=0
return img
def create_seg(alpha,trimap):
#old
num_holes=np.random.randint(low=0, high=3)
crop_size_list=[(15,15),(25,25),(35,35),(45,45)]
kernel_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
kernel_dil = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
seg = (alpha>0.5).astype(np.float32)
#print('Before %.4f max: %.4f' %(seg.sum(),seg.max()))
#old
seg = cv2.erode(seg, kernel_er, iterations=np.random.randint(low=10,high=20))
seg = cv2.dilate(seg, kernel_dil, iterations=np.random.randint(low=15,high=30))
#print('After %.4f max: %.4f' %(seg.sum(),seg.max()))
seg=seg.astype(np.float32)
seg=(255*seg).astype(np.uint8)
for i in range(num_holes):
crop_size=random.choice(crop_size_list)
cx,cy = random_choice(trimap,crop_size)
seg=crop_holes(seg,cx,cy,crop_size)
trimap=crop_holes(trimap,cx,cy,crop_size)
k_size_list=[(21,21),(31,31),(41,41)]
seg=cv2.GaussianBlur(seg.astype(np.float32),random.choice(k_size_list),0)
return seg.astype(np.uint8)
def apply_crop(img,bbox,reso):
img_crop=img[bbox[0]:bbox[0]+bbox[2],bbox[1]:bbox[1]+bbox[3],...];
img_crop=cv2.resize(img_crop,reso)
return img_crop
def create_bbox(mask,R,C):
where = np.array(np.where(mask))
x1, y1 = np.amin(where, axis=1)
x2, y2 = np.amax(where, axis=1)
w=np.maximum(y2-y1,x2-x1);
bd=np.random.uniform(0.1,0.4)
x1=x1-np.round(bd*w)
y1=y1-np.round(bd*w)
y2=y2+np.round(bd*w)
if x1<0: x1=0
if y1<0: y1=0
if y2>=C: y2=C
if x2>=R: x2=R-1
bbox=np.around([x1,y1,x2-x1,y2-y1]).astype('int')
return bbox
def composite(fg, bg, a):
fg = fg.astype(np.float32); bg=bg.astype(np.float32); a=a.astype(np.float32);
alpha= np.expand_dims(a / 255,axis=2)
im = alpha * fg + (1 - alpha) * bg
im = im.astype(np.uint8)
return im
def add_noise(back,mean,sigma):
back=back.astype(np.float32)
row,col,ch= back.shape
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
#gauss = np.repeat(gauss[:, :, np.newaxis], ch, axis=2)
noisy = back + gauss
noisy[noisy<0]=0; noisy[noisy>255]=255;
return noisy.astype(np.uint8)
def safe_crop(mat, x, y, crop_size,img_size,cubic=True):
img_rows, img_cols = img_size
crop_height, crop_width = crop_size
if len(mat.shape) == 2:
ret = np.zeros((crop_height, crop_width), np.float32)
else:
ret = np.zeros((crop_height, crop_width, 3), np.float32)
crop = mat[y:y + crop_height, x:x + crop_width]
h, w = crop.shape[:2]
ret[0:h, 0:w] = crop
if crop_size != (img_rows, img_cols):
if cubic:
ret = cv2.resize(ret, dsize=(img_rows, img_cols))
else:
ret = cv2.resize(ret, dsize=(img_rows, img_cols), interpolation=cv2.INTER_NEAREST)
return ret
def generate_trimap(alpha,K1,K2,train_mode):
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
fg = np.array(np.equal(alpha, 255).astype(np.float32))
if train_mode:
K=np.random.randint(K1,K2)
else:
K=np.round((K1+K2)/2).astype('int')
fg = cv2.erode(fg, kernel, iterations=K)
unknown = np.array(np.not_equal(alpha, 0).astype(np.float32))
unknown = cv2.dilate(unknown, kernel, iterations=2*K)
trimap = fg * 255 + (unknown - fg) * 128
return trimap.astype(np.uint8)
def random_choice(trimap, crop_size=(320, 320)):
img_height, img_width = trimap.shape[0:2]
crop_height, crop_width = crop_size
val_idx=np.zeros((img_height,img_width))
val_idx[int(crop_height/2):int(img_height-crop_height/2),int(crop_width/2):int(img_width-crop_width/2)]=1
y_indices, x_indices = np.where(np.logical_and(trimap == unknown_code,val_idx==1))
num_unknowns = len(y_indices)
x, y = 0, 0
if num_unknowns > 0:
ix = np.random.choice(range(num_unknowns))
center_x = x_indices[ix]
center_y = y_indices[ix]
x = max(0, center_x - int(crop_width / 2))
y = max(0, center_y - int(crop_height / 2))
#added extra
return x, y
def to_tensor(pic):
if len(pic.shape)>=3:
img = torch.from_numpy(pic.transpose((2, 0, 1)))
else:
img=torch.from_numpy(pic)
img=img.unsqueeze(0)
# backward compatibility
return 2*(img.float().div(255))-1
|
from __future__ import print_function
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import os
import time
import argparse
import numpy as np
from data_loader import VideoData
from functions import *
from networks import ResnetConditionHR, MultiscaleDiscriminator, conv_init
from loss_functions import alpha_loss, compose_loss, alpha_gradient_loss, GANloss
#CUDA
#os.environ["CUDA_VISIBLE_DEVICES"]="4"
print('CUDA Device: ' + os.environ["CUDA_VISIBLE_DEVICES"])
"""Parses arguments."""
parser = argparse.ArgumentParser(description='Training Background Matting on Adobe Dataset.')
parser.add_argument('-n', '--name', type=str, help='Name of tensorboard and model saving folders.')
parser.add_argument('-bs', '--batch_size', type=int, help='Batch Size.')
parser.add_argument('-res', '--reso', type=int, help='Input image resolution')
parser.add_argument('-init_model', '--init_model', type=str, help='Initial model file')
parser.add_argument('-epoch', '--epoch', type=int, default=10,help='Maximum Epoch')
parser.add_argument('-n_blocks1', '--n_blocks1', type=int, default=7,help='Number of residual blocks after Context Switching.')
parser.add_argument('-n_blocks2', '--n_blocks2', type=int, default=3,help='Number of residual blocks for Fg and alpha each.')
parser.add_argument('-d', '--debug', type=str, default="", help='File to dump output')
parser.add_argument('-s', '--script', type=bool, default=False, help='Trace the model')
args=parser.parse_args()
##Directories
tb_dir='TB_Summary/' + args.name
model_dir='Models/' + args.name
torch.manual_seed(1337)
np.random.seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if not os.path.exists(tb_dir):
os.makedirs(tb_dir)
## Input list
data_config_train = {'reso': (args.reso,args.reso)} #if trimap is true, rcnn is used
# DATA LOADING
print('\n[Phase 1] : Data Preparation')
def collate_filter_none(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
#Original Data
traindata = VideoData(csv_file='Video_data_train.csv',data_config=data_config_train,transform=None) #Write a dataloader function that can read the database provided by .csv file
train_loader = torch.utils.data.DataLoader(traindata, batch_size=args.batch_size, shuffle=True, num_workers=args.batch_size, collate_fn=collate_filter_none)
print('\n[Phase 2] : Initialization')
netB=ResnetConditionHR(input_nc=(3,3,1,4),output_nc=4,n_blocks1=args.n_blocks1,n_blocks2=args.n_blocks2)
#netB=nn.DataParallel(netB)
#netB.load_state_dict(torch.load(args.init_model))
netB.cuda(); netB.eval()
for param in netB.parameters(): #freeze netD
param.requires_grad = False
netG=ResnetConditionHR(input_nc=(3,3,1,4),output_nc=4,n_blocks1=args.n_blocks1,n_blocks2=args.n_blocks2)
netG.apply(conv_init)
#netG=nn.DataParallel(netG)
netG.cuda()
torch.backends.cudnn.benchmark=True
netD=MultiscaleDiscriminator(input_nc=3,num_D=1,norm_layer=nn.InstanceNorm2d,ndf=64)
netD.apply(conv_init)
netD=nn.DataParallel(netD)
netD.cuda()
#Loss
l1_loss=alpha_loss()
c_loss=compose_loss()
g_loss=alpha_gradient_loss()
GAN_loss=GANloss()
optimizerG = optim.Adam(netG.parameters(), lr=1e-4)
optimizerD = optim.Adam(netD.parameters(), lr=1e-5)
log_writer=SummaryWriter(tb_dir)
step=50
KK=len(train_loader)
wt=1
print('Tracing')
for data in train_loader:
bg, image, seg, multi_fr = data['bg'], data['image'], data['seg'], data['multi_fr']
bg, image, seg, multi_fr = Variable(bg.cuda()), Variable(image.cuda()), Variable(seg.cuda()), Variable(multi_fr.cuda())
if args.script:
netB = torch.jit.trace(netB,(image,bg,seg,multi_fr))
netG = torch.jit.trace(netG,(image,bg,seg,multi_fr))
else:
netB(image,bg,seg,multi_fr)
netG(image,bg,seg,multi_fr)
break
print('Starting training')
for epoch in range(0,args.epoch):
netG.train(); netD.train()
lG, lD, GenL, DisL_r, DisL_f, alL, fgL, compL, elapse_run, elapse=0,0,0,0,0,0,0,0,0,0
t0=time.time();
for i,data in enumerate(train_loader):
#Initiating
bg, image, seg, multi_fr, seg_gt, back_rnd = data['bg'], data['image'], data['seg'], data['multi_fr'], data['seg-gt'], data['back-rnd']
bg, image, seg, multi_fr, seg_gt, back_rnd = Variable(bg.cuda()), Variable(image.cuda()), Variable(seg.cuda()), Variable(multi_fr.cuda()), Variable(seg_gt.cuda()), Variable(back_rnd.cuda())
mask0=Variable(torch.ones(seg.shape).cuda())
tr0=time.time()
#pseudo-supervision
alpha_pred_sup,fg_pred_sup=netB(image,bg,seg,multi_fr)
mask=(alpha_pred_sup>-0.98).type(torch.cuda.FloatTensor)
mask1=(seg_gt>0.95).type(torch.cuda.FloatTensor)
## Train Generator
alpha_pred,fg_pred=netG(image,bg,seg,multi_fr)
if args.debug:
torch.save(fg_pred, args.debug)
##pseudo-supervised losses
al_loss=l1_loss(alpha_pred_sup,alpha_pred,mask0)+0.5*g_loss(alpha_pred_sup,alpha_pred,mask0)
fg_loss=l1_loss(fg_pred_sup,fg_pred,mask)
#compose into same background
comp_loss= c_loss(image,alpha_pred,fg_pred,bg,mask1)
#randomly permute the background
perm=torch.LongTensor(np.random.permutation(bg.shape[0]))
bg_sh=bg[perm,:,:,:]
al_mask=(alpha_pred>0.95).type(torch.cuda.FloatTensor)
#Choose the target background for composition
#back_rnd: contains separate set of background videos captured
#bg_sh: contains randomly permuted captured background from the same minibatch
if np.random.random_sample() > 0.5:
bg_sh=back_rnd
image_sh=compose_image_withshift(alpha_pred,image*al_mask + fg_pred*(1-al_mask),bg_sh,seg)
fake_response=netD(image_sh)
loss_ganG=GAN_loss(fake_response,label_type=True)
lossG= loss_ganG + wt*(0.05*comp_loss+0.05*al_loss+0.05*fg_loss)
optimizerG.zero_grad()
lossG.backward()
optimizerG.step()
##Train Discriminator
fake_response=netD(image_sh); real_response=netD(image)
loss_ganD_fake=GAN_loss(fake_response,label_type=False)
loss_ganD_real=GAN_loss(real_response,label_type=True)
lossD=(loss_ganD_real+loss_ganD_fake)*0.5
# Update discriminator for every 5 generator update
if i%5 ==0:
optimizerD.zero_grad()
lossD.backward()
optimizerD.step()
lG += lossG.data
lD += lossD.data
GenL += loss_ganG.data
DisL_r += loss_ganD_real.data
DisL_f += loss_ganD_fake.data
alL += al_loss.data
fgL += fg_loss.data
compL += comp_loss.data
log_writer.add_scalar('Generator Loss', lossG.data, epoch*KK + i + 1)
log_writer.add_scalar('Discriminator Loss', lossD.data, epoch*KK + i + 1)
log_writer.add_scalar('Generator Loss: Fake', loss_ganG.data, epoch*KK + i + 1)
log_writer.add_scalar('Discriminator Loss: Real', loss_ganD_real.data, epoch*KK + i + 1)
log_writer.add_scalar('Discriminator Loss: Fake', loss_ganD_fake.data, epoch*KK + i + 1)
log_writer.add_scalar('Generator Loss: Alpha', al_loss.data, epoch*KK + i + 1)
log_writer.add_scalar('Generator Loss: Fg', fg_loss.data, epoch*KK + i + 1)
log_writer.add_scalar('Generator Loss: Comp', comp_loss.data, epoch*KK + i + 1)
t1=time.time()
elapse +=t1 -t0
elapse_run += t1-tr0
t0=t1
if i % step == (step-1):
print('[%d, %5d] Gen-loss: %.4f Disc-loss: %.4f Alpha-loss: %.4f Fg-loss: %.4f Comp-loss: %.4f Time-all: %.4f Time-fwbw: %.4f' %(epoch + 1, i + 1, lG/step,lD/step,alL/step,fgL/step,compL/step,elapse/step,elapse_run/step))
lG, lD, GenL, DisL_r, DisL_f, alL, fgL, compL, elapse_run, elapse=0,0,0,0,0,0,0,0,0,0
write_tb_log(image,'image',log_writer,i)
write_tb_log(seg,'seg',log_writer,i)
write_tb_log(alpha_pred_sup,'alpha-sup',log_writer,i)
write_tb_log(alpha_pred,'alpha_pred',log_writer,i)
write_tb_log(fg_pred_sup*mask,'fg-pred-sup',log_writer,i)
write_tb_log(fg_pred*mask,'fg_pred',log_writer,i)
#composition
alpha_pred=(alpha_pred+1)/2
comp=fg_pred*alpha_pred + (1-alpha_pred)*bg
write_tb_log(comp,'composite-same',log_writer,i)
write_tb_log(image_sh,'composite-diff',log_writer,i)
del comp
del mask, back_rnd, mask0, seg_gt, mask1, bg, alpha_pred, alpha_pred_sup, image, fg_pred_sup, fg_pred, seg, multi_fr,image_sh, bg_sh, fake_response, real_response, al_loss, fg_loss, comp_loss, lossG, lossD, loss_ganD_real, loss_ganD_fake, loss_ganG
if (epoch%2 == 0):
torch.save(netG.state_dict(), model_dir + 'netG_epoch_%d.pth' %(epoch))
torch.save(optimizerG.state_dict(), model_dir + 'optimG_epoch_%d.pth' %(epoch))
torch.save(netD.state_dict(), model_dir + 'netD_epoch_%d.pth' %(epoch))
torch.save(optimizerD.state_dict(), model_dir + 'optimD_epoch_%d.pth' %(epoch))
#Change weight every 2 epoch to put more stress on discriminator weight and less on pseudo-supervision
wt=wt/2
|
import os
import time
from argparse import Namespace
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
from .data_loader import VideoData
from .functions import compose_image_withshift, write_tb_log
from .networks import ResnetConditionHR, MultiscaleDiscriminator, conv_init
from .loss_functions import alpha_loss, compose_loss, alpha_gradient_loss, GANloss
import random
import numpy as np
from pathlib import Path
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark import DATA_PATH
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
def _collate_filter_none(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def _create_data_dir():
data_dir = Path(__file__).parent.joinpath(".data")
data_dir.mkdir(parents=True, exist_ok=True)
return data_dir
class Model(BenchmarkModel):
task = COMPUTER_VISION.PATTERN_RECOGNITION
# Original btach size: 4
# Original hardware: unknown
# Source: https://arxiv.org/pdf/2004.00626.pdf
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
self.opt = Namespace(**{
'n_blocks1': 7,
'n_blocks2': 3,
'batch_size': self.batch_size,
'resolution': 512,
'name': 'Real_fixed'
})
datadir = os.path.join(DATA_PATH, "Background_Matting_inputs")
csv_file_path = _create_data_dir().joinpath("Video_data_train_processed.csv")
with open(f"{datadir}/Video_data_train.csv", "r") as r:
with open(csv_file_path, "w") as w:
w.write(r.read().format(scriptdir=datadir))
data_config_train = {
'reso': (self.opt.resolution, self.opt.resolution)}
traindata = VideoData(csv_file=csv_file_path,
data_config=data_config_train, transform=None)
train_loader = torch.utils.data.DataLoader(
traindata, batch_size=self.opt.batch_size, shuffle=True, num_workers=0, collate_fn=_collate_filter_none)
self.train_data = []
for data in train_loader:
self.train_data.append(data)
for key in data:
data[key].to(self.device)
netB = ResnetConditionHR(input_nc=(
3, 3, 1, 4), output_nc=4, n_blocks1=self.opt.n_blocks1, n_blocks2=self.opt.n_blocks2)
netB.to(self.device)
netB.eval()
for param in netB.parameters(): # freeze netB
param.requires_grad = False
self.netB = netB
netG = ResnetConditionHR(input_nc=(
3, 3, 1, 4), output_nc=4, n_blocks1=self.opt.n_blocks1, n_blocks2=self.opt.n_blocks2)
netG.apply(conv_init)
self.netG = netG
self.netG.to(self.device)
netD = MultiscaleDiscriminator(
input_nc=3, num_D=1, norm_layer=nn.InstanceNorm2d, ndf=64)
netD.apply(conv_init)
# netD = nn.DataParallel(netD)
self.netD = netD
self.netD.to(self.device)
self.l1_loss = alpha_loss()
self.c_loss = compose_loss()
self.g_loss = alpha_gradient_loss()
self.GAN_loss = GANloss()
self.optimizerG = optim.Adam(netG.parameters(), lr=1e-4)
self.optimizerD = optim.Adam(netD.parameters(), lr=1e-5)
self.log_writer = SummaryWriter(datadir)
self.model_dir = datadir
self._maybe_trace()
def _maybe_trace(self):
for data in self.train_data:
bg, image, seg, multi_fr = data['bg'], data['image'], data['seg'], data['multi_fr']
bg, image, seg, multi_fr = Variable(bg.to(self.device)), Variable(
image.to(self.device)), Variable(seg.to(self.device)), Variable(multi_fr.to(self.device))
if self.jit:
self.netB = torch.jit.trace(
self.netB, (image, bg, seg, multi_fr))
self.netG = torch.jit.trace(
self.netG, (image, bg, seg, multi_fr))
else:
self.netB(image, bg, seg, multi_fr)
self.netG(image, bg, seg, multi_fr)
break
def get_module(self):
# use netG (generation) for the return module
for _i, data in enumerate(self.train_data):
bg, image, seg, multi_fr, seg_gt, back_rnd = data['bg'], data[
'image'], data['seg'], data['multi_fr'], data['seg-gt'], data['back-rnd']
return self.netG, (image.to(self.device), bg.to(self.device), seg.to(self.device), multi_fr.to(self.device))
def train(self):
self.netG.train()
self.netD.train()
lG, lD, GenL, DisL_r, DisL_f, alL, fgL, compL, elapse_run, elapse = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
t0 = time.time()
KK = len(self.train_data)
wt = 1
epoch = 0
step = 50
num_of_batches = 1
for i, data in zip(range(num_of_batches), self.train_data):
# Initiating
bg, image, seg, multi_fr, seg_gt, back_rnd = data['bg'], data[
'image'], data['seg'], data['multi_fr'], data['seg-gt'], data['back-rnd']
bg, image, seg, multi_fr, seg_gt, back_rnd = Variable(bg.to(self.device)), Variable(image.to(self.device)), Variable(
seg.to(self.device)), Variable(multi_fr.to(self.device)), Variable(seg_gt.to(self.device)), Variable(back_rnd.to(self.device))
mask0 = Variable(torch.ones(seg.shape).to(self.device))
tr0 = time.time()
# pseudo-supervision
alpha_pred_sup, fg_pred_sup = self.netB(image, bg, seg, multi_fr)
if self.device == 'cuda':
mask = (alpha_pred_sup > -0.98).type(torch.cuda.FloatTensor)
mask1 = (seg_gt > 0.95).type(torch.cuda.FloatTensor)
else:
mask = (alpha_pred_sup > -0.98).type(torch.FloatTensor)
mask1 = (seg_gt > 0.95).type(torch.FloatTensor)
# Train Generator
alpha_pred, fg_pred = self.netG(image, bg, seg, multi_fr)
# pseudo-supervised losses
al_loss = self.l1_loss(alpha_pred_sup, alpha_pred, mask0) + \
0.5 * self.g_loss(alpha_pred_sup, alpha_pred, mask0)
fg_loss = self.l1_loss(fg_pred_sup, fg_pred, mask)
# compose into same background
comp_loss = self.c_loss(image, alpha_pred, fg_pred, bg, mask1)
# randomly permute the background
perm = torch.LongTensor(np.random.permutation(bg.shape[0]))
bg_sh = bg[perm, :, :, :]
if self.device == 'cuda':
al_mask = (alpha_pred > 0.95).type(torch.cuda.FloatTensor)
else:
al_mask = (alpha_pred > 0.95).type(torch.FloatTensor)
# Choose the target background for composition
# back_rnd: contains separate set of background videos captured
# bg_sh: contains randomly permuted captured background from the same minibatch
if np.random.random_sample() > 0.5:
bg_sh = back_rnd
image_sh = compose_image_withshift(
alpha_pred, image*al_mask + fg_pred*(1-al_mask), bg_sh, seg)
fake_response = self.netD(image_sh)
loss_ganG = self.GAN_loss(fake_response, label_type=True)
lossG = loss_ganG + wt*(0.05*comp_loss+0.05*al_loss+0.05*fg_loss)
self.optimizerG.zero_grad()
lossG.backward()
self.optimizerG.step()
# Train Discriminator
fake_response = self.netD(image_sh)
real_response = self.netD(image)
loss_ganD_fake = self.GAN_loss(fake_response, label_type=False)
loss_ganD_real = self.GAN_loss(real_response, label_type=True)
lossD = (loss_ganD_real+loss_ganD_fake)*0.5
# Update discriminator for every 5 generator update
if i % 5 == 0:
self.optimizerD.zero_grad()
lossD.backward()
self.optimizerD.step()
lG += lossG.data
lD += lossD.data
GenL += loss_ganG.data
DisL_r += loss_ganD_real.data
DisL_f += loss_ganD_fake.data
alL += al_loss.data
fgL += fg_loss.data
compL += comp_loss.data
self.log_writer.add_scalar(
'Generator Loss', lossG.data, epoch*KK + i + 1)
self.log_writer.add_scalar('Discriminator Loss',
lossD.data, epoch*KK + i + 1)
self.log_writer.add_scalar('Generator Loss: Fake',
loss_ganG.data, epoch*KK + i + 1)
self.log_writer.add_scalar('Discriminator Loss: Real',
loss_ganD_real.data, epoch*KK + i + 1)
self.log_writer.add_scalar('Discriminator Loss: Fake',
loss_ganD_fake.data, epoch*KK + i + 1)
self.log_writer.add_scalar('Generator Loss: Alpha',
al_loss.data, epoch*KK + i + 1)
self.log_writer.add_scalar('Generator Loss: Fg',
fg_loss.data, epoch*KK + i + 1)
self.log_writer.add_scalar('Generator Loss: Comp',
comp_loss.data, epoch*KK + i + 1)
t1 = time.time()
elapse += t1 - t0
elapse_run += t1-tr0
t0 = t1
if i % step == (step-1):
print('[%d, %5d] Gen-loss: %.4f Disc-loss: %.4f Alpha-loss: %.4f Fg-loss: %.4f Comp-loss: %.4f Time-all: %.4f Time-fwbw: %.4f' %
(epoch + 1, i + 1, lG/step, lD/step, alL/step, fgL/step, compL/step, elapse/step, elapse_run/step))
lG, lD, GenL, DisL_r, DisL_f, alL, fgL, compL, elapse_run, elapse = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
write_tb_log(image, 'image', self.log_writer, i)
write_tb_log(seg, 'seg', self.log_writer, i)
write_tb_log(alpha_pred_sup, 'alpha-sup', self.log_writer, i)
write_tb_log(alpha_pred, 'alpha_pred', self.log_writer, i)
write_tb_log(fg_pred_sup*mask, 'fg-pred-sup',
self.log_writer, i)
write_tb_log(fg_pred*mask, 'fg_pred', self.log_writer, i)
# composition
alpha_pred = (alpha_pred+1)/2
comp = fg_pred*alpha_pred + (1-alpha_pred)*bg
write_tb_log(comp, 'composite-same', self.log_writer, i)
write_tb_log(image_sh, 'composite-diff', self.log_writer, i)
del comp
del mask, back_rnd, mask0, seg_gt, mask1, bg, alpha_pred, alpha_pred_sup, image, fg_pred_sup, fg_pred, seg, multi_fr, image_sh, bg_sh, fake_response, real_response, al_loss, fg_loss, comp_loss, lossG, lossD, loss_ganD_real, loss_ganD_fake, loss_ganG
if (epoch % 2 == 0):
torch.save(self.netG.state_dict(),
os.path.join(self.model_dir, 'netG_epoch_%d.pth' % (epoch)))
torch.save(self.optimizerG.state_dict(),
os.path.join(self.model_dir, 'optimG_epoch_%d.pth' % (epoch)))
torch.save(self.netD.state_dict(),
os.path.join(self.model_dir, 'netD_epoch_%d.pth' % (epoch)))
torch.save(self.optimizerD.state_dict(),
os.path.join(self.model_dir, 'optimD_epoch_%d.pth' % (epoch)))
# Change weight every 2 epoch to put more stress on discriminator weight and less on pseudo-supervision
wt = wt/2
def eval(self):
raise NotImplementedError()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
class ResnetConditionHR(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, nf_part=64,norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks1=7, n_blocks2=3, padding_type='reflect'):
assert(n_blocks1 >= 0); assert(n_blocks2 >= 0)
super(ResnetConditionHR, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
use_bias=True
#main encoder output 256xW/4xH/4
model_enc1 = [nn.ReflectionPad2d(3),nn.Conv2d(input_nc[0], ngf, kernel_size=7, padding=0,bias=use_bias),norm_layer(ngf),nn.ReLU(True)]
model_enc1 += [nn.Conv2d(ngf , ngf * 2, kernel_size=3,stride=2, padding=1, bias=use_bias),norm_layer(ngf * 2),nn.ReLU(True)]
model_enc2 = [nn.Conv2d(ngf*2 , ngf * 4, kernel_size=3,stride=2, padding=1, bias=use_bias),norm_layer(ngf * 4),nn.ReLU(True)]
#back encoder output 256xW/4xH/4
model_enc_back = [nn.ReflectionPad2d(3),nn.Conv2d(input_nc[1], ngf, kernel_size=7, padding=0,bias=use_bias),norm_layer(ngf),nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model_enc_back += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,stride=2, padding=1, bias=use_bias),norm_layer(ngf * mult * 2),nn.ReLU(True)]
#seg encoder output 256xW/4xH/4
model_enc_seg = [nn.ReflectionPad2d(3),nn.Conv2d(input_nc[2], ngf, kernel_size=7, padding=0,bias=use_bias),norm_layer(ngf),nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model_enc_seg += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,stride=2, padding=1, bias=use_bias),norm_layer(ngf * mult * 2),nn.ReLU(True)]
mult = 2**n_downsampling
# #motion encoder output 256xW/4xH/4
model_enc_multi = [nn.ReflectionPad2d(3),nn.Conv2d(input_nc[3], ngf, kernel_size=7, padding=0,bias=use_bias),norm_layer(ngf),nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model_enc_multi += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,stride=2, padding=1, bias=use_bias),norm_layer(ngf * mult * 2),nn.ReLU(True)]
self.model_enc1 = nn.Sequential(*model_enc1)
self.model_enc2 = nn.Sequential(*model_enc2)
self.model_enc_back = nn.Sequential(*model_enc_back)
self.model_enc_seg = nn.Sequential(*model_enc_seg)
self.model_enc_multi = nn.Sequential(*model_enc_multi)
mult = 2**n_downsampling
self.comb_back=nn.Sequential(nn.Conv2d(ngf * mult*2,nf_part,kernel_size=1,stride=1,padding=0,bias=False),norm_layer(ngf),nn.ReLU(True))
self.comb_seg=nn.Sequential(nn.Conv2d(ngf * mult*2,nf_part,kernel_size=1,stride=1,padding=0,bias=False),norm_layer(ngf),nn.ReLU(True))
self.comb_multi=nn.Sequential(nn.Conv2d(ngf * mult*2,nf_part,kernel_size=1,stride=1,padding=0,bias=False),norm_layer(ngf),nn.ReLU(True))
#decoder
model_res_dec=[nn.Conv2d(ngf * mult +3*nf_part,ngf*mult,kernel_size=1,stride=1,padding=0,bias=False),norm_layer(ngf*mult),nn.ReLU(True)]
for i in range(n_blocks1):
model_res_dec += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
model_res_dec_al=[]
for i in range(n_blocks2):
model_res_dec_al += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
model_res_dec_fg=[]
for i in range(n_blocks2):
model_res_dec_fg += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
model_dec_al=[]
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
#model_dec_al += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),kernel_size=3, stride=2,padding=1, output_padding=1,bias=use_bias),norm_layer(int(ngf * mult / 2)),nn.ReLU(True)]
model_dec_al += [nn.Upsample(scale_factor=2,mode='bilinear',align_corners = True),nn.Conv2d(ngf * mult, int(ngf * mult / 2), 3, stride=1,padding=1),norm_layer(int(ngf * mult / 2)),nn.ReLU(True)]
model_dec_al += [nn.ReflectionPad2d(3),nn.Conv2d(ngf, 1, kernel_size=7, padding=0),nn.Tanh()]
model_dec_fg1=[nn.Upsample(scale_factor=2,mode='bilinear',align_corners = True),nn.Conv2d(ngf * 4, int(ngf * 2), 3, stride=1,padding=1),norm_layer(int(ngf * 2)),nn.ReLU(True)]
model_dec_fg2=[nn.Upsample(scale_factor=2,mode='bilinear',align_corners = True),nn.Conv2d(ngf * 4, ngf, 3, stride=1,padding=1),norm_layer(ngf),nn.ReLU(True),nn.ReflectionPad2d(3),nn.Conv2d(ngf, output_nc-1, kernel_size=7, padding=0)]
self.model_res_dec = nn.Sequential(*model_res_dec)
self.model_res_dec_al=nn.Sequential(*model_res_dec_al)
self.model_res_dec_fg=nn.Sequential(*model_res_dec_fg)
self.model_al_out=nn.Sequential(*model_dec_al)
self.model_dec_fg1=nn.Sequential(*model_dec_fg1)
self.model_fg_out = nn.Sequential(*model_dec_fg2)
def forward(self, image,back,seg,multi):
img_feat1=self.model_enc1(image)
img_feat=self.model_enc2(img_feat1)
back_feat=self.model_enc_back(back)
seg_feat=self.model_enc_seg(seg)
multi_feat=self.model_enc_multi(multi)
oth_feat=torch.cat([self.comb_back(torch.cat([img_feat,back_feat],dim=1)),self.comb_seg(torch.cat([img_feat,seg_feat],dim=1)),self.comb_multi(torch.cat([img_feat,back_feat],dim=1))],dim=1)
out_dec=self.model_res_dec(torch.cat([img_feat,oth_feat],dim=1))
out_dec_al=self.model_res_dec_al(out_dec)
al_out=self.model_al_out(out_dec_al)
out_dec_fg=self.model_res_dec_fg(out_dec)
out_dec_fg1=self.model_dec_fg1(out_dec_fg)
fg_out=self.model_fg_out(torch.cat([out_dec_fg1,img_feat1],dim=1))
return al_out, fg_out
############################## part ##################################
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform(m.weight, gain=np.sqrt(2))
#init.normal(m.weight)
if m.bias is not None:
init.constant(m.bias, 0)
if classname.find('Linear') != -1:
init.normal(m.weight)
init.constant(m.bias,1)
if classname.find('BatchNorm2d') != -1:
init.normal(m.weight.data, 1.0, 0.2)
init.constant(m.bias.data, 0.0)
class conv3x3(nn.Module):
'''(conv => BN => ReLU)'''
def __init__(self, in_ch, out_ch):
super(conv3x3, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, stride=2,padding=1),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(0.2,inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class conv3x3s1(nn.Module):
'''(conv => BN => ReLU)'''
def __init__(self, in_ch, out_ch):
super(conv3x3s1, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, stride=1,padding=1),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(0.2,inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class conv1x1(nn.Module):
'''(conv => BN => ReLU)'''
def __init__(self, in_ch, out_ch):
super(conv1x1, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 1, stride=1,padding=0),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(0.2,inplace=True),
)
def forward(self, x):
x = self.conv(x)
return x
class upconv3x3(nn.Module):
def __init__(self, in_ch, out_ch):
super(upconv3x3, self).__init__()
self.conv = nn.Sequential(
nn.Upsample(scale_factor=2,mode='bilinear'),
nn.Conv2d(in_ch, out_ch, 3, stride=1,padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
x=self.conv(x)
return x
class fc(nn.Module):
def __init__(self,in_ch,out_ch):
super(fc,self).__init__()
self.fullc = nn.Sequential(
nn.Linear(in_ch,out_ch),
nn.ReLU(inplace=True),
)
def forward(self,x):
x=self.fullc(x)
return x
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
##################################### Discriminators ####################################################
class MultiscaleDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d,
use_sigmoid=False, num_D=3, getIntermFeat=False):
super(MultiscaleDiscriminator, self).__init__()
self.num_D = num_D
self.n_layers = n_layers
self.getIntermFeat = getIntermFeat
for i in range(num_D):
netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
if getIntermFeat:
for j in range(n_layers+2):
setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))
else:
setattr(self, 'layer'+str(i), netD.model)
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
def singleD_forward(self, model, input):
if self.getIntermFeat:
result = [input]
for i in range(len(model)):
result.append(model[i](result[-1]))
return result[1:]
else:
return [model(input)]
def forward(self, input):
num_D = self.num_D
result = []
input_downsampled = input
for i in range(num_D):
if self.getIntermFeat:
model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
else:
model = getattr(self, 'layer'+str(num_D-1-i))
result.append(self.singleD_forward(model, input_downsampled))
if i != (num_D-1):
input_downsampled = self.downsample(input_downsampled)
return result
# Defines the PatchGAN discriminator with the specified arguments.
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
super(NLayerDiscriminator, self).__init__()
self.getIntermFeat = getIntermFeat
self.n_layers = n_layers
kw = 4
padw = int(np.ceil((kw-1.0)/2))
sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
nf = ndf
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
norm_layer(nf), nn.LeakyReLU(0.2, True)
]]
nf_prev = nf
nf = min(nf * 2, 512)
sequence += [[
nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
norm_layer(nf),
nn.LeakyReLU(0.2, True)
]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
if use_sigmoid:
sequence += [[nn.Sigmoid()]]
if getIntermFeat:
for n in range(len(sequence)):
setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
else:
sequence_stream = []
for n in range(len(sequence)):
sequence_stream += sequence[n]
self.model = nn.Sequential(*sequence_stream)
def forward(self, input):
if self.getIntermFeat:
res = [input]
for n in range(self.n_layers+2):
model = getattr(self, 'model'+str(n))
res.append(model(res[-1]))
return res[1:]
else:
return self.model(input)
|
#######################################
# Prepares training data. Takes a path to a directory of videos + captured backgrounds, dumps frames, extracts human
# segmentations. Also takes a path of background videos. Creates a training CSV file with lines of the following format,
# by using all but the last 80 frames of each video and iterating repeatedly over the background frames as needed.
#$image;$captured_back;$segmentation;$image+20frames;$image+2*20frames;$image+3*20frames;$image+4*20frames;$target_back
path = "ak/"
background_path = "ak/"
output_csv = "Video_data_train.csv"
#######################################
import os
from itertools import cycle
from tqdm import tqdm
with open(output_csv, "w") as f:
video = "ak"
n = len(os.listdir(video))
print(n)
assert n % 2 == 0
n //= 2
for j in range(1, n + 1 - 5):
img_name = video + "/%04d_img.png" % j
captured_back = video + ".png"
seg_name = video + "/%04d_masksDL.png" % j
mc1 = video + "/%04d_img.png" % (j + 1)
mc2 = video + "/%04d_img.png" % (j + 2)
mc3 = video + "/%04d_img.png" % (j + 3)
mc4 = video + "/%04d_img.png" % (j + 4)
target_back = "ak.png"
csv_line = f"{img_name};{captured_back};{seg_name};{mc1};{mc2};{mc3};{mc4};{target_back}\n"
f.write(csv_line)
print(f"Done, written to {output_csv}")
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip',
'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from __future__ import print_function
import os, glob, time, argparse, pdb, cv2
#import matplotlib.pyplot as plt
import numpy as np
from skimage.measure import label
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from functions import *
from networks import ResnetConditionHR
torch.set_num_threads(1)
#os.environ["CUDA_VISIBLE_DEVICES"]="4"
print('CUDA Device: ' + os.environ["CUDA_VISIBLE_DEVICES"])
"""Parses arguments."""
parser = argparse.ArgumentParser(description='Background Matting.')
parser.add_argument('-m', '--trained_model', type=str, default='real-fixed-cam',choices=['real-fixed-cam', 'real-hand-held', 'syn-comp-adobe'],help='Trained background matting model')
parser.add_argument('-o', '--output_dir', type=str, required=True,help='Directory to save the output results. (required)')
parser.add_argument('-i', '--input_dir', type=str, required=True,help='Directory to load input images. (required)')
parser.add_argument('-tb', '--target_back', type=str,help='Directory to load the target background.')
parser.add_argument('-b', '--back', type=str,default=None,help='Captured background image. (only use for inference on videos with fixed camera')
args=parser.parse_args()
#input model
model_main_dir='Models/' + args.trained_model + '/';
#input data path
data_path=args.input_dir
if os.path.isdir(args.target_back):
args.video=True
print('Using video mode')
else:
args.video=False
print('Using image mode')
#target background path
back_img10=cv2.imread(args.target_back); back_img10=cv2.cvtColor(back_img10,cv2.COLOR_BGR2RGB);
#Green-screen background
back_img20=np.zeros(back_img10.shape); back_img20[...,0]=120; back_img20[...,1]=255; back_img20[...,2]=155;
#initialize network
fo=glob.glob(model_main_dir + 'netG_epoch_*')
model_name1=fo[0]
netM=ResnetConditionHR(input_nc=(3,3,1,4),output_nc=4,n_blocks1=7,n_blocks2=3)
netM=nn.DataParallel(netM)
netM.load_state_dict(torch.load(model_name1))
netM.cuda(); netM.eval()
cudnn.benchmark=True
reso=(512,512) #input reoslution to the network
#load captured background for video mode, fixed camera
if args.back is not None:
bg_im0=cv2.imread(args.back); bg_im0=cv2.cvtColor(bg_im0,cv2.COLOR_BGR2RGB);
#Create a list of test images
test_imgs = [f for f in os.listdir(data_path) if
os.path.isfile(os.path.join(data_path, f)) and f.endswith('_img.png')]
test_imgs.sort()
#output directory
result_path=args.output_dir
if not os.path.exists(result_path):
os.makedirs(result_path)
for i in range(0,len(test_imgs)):
filename = test_imgs[i]
#original image
bgr_img = cv2.imread(os.path.join(data_path, filename)); bgr_img=cv2.cvtColor(bgr_img,cv2.COLOR_BGR2RGB);
if args.back is None:
#captured background image
bg_im0=cv2.imread(os.path.join(data_path, filename.replace('_img','_back'))); bg_im0=cv2.cvtColor(bg_im0,cv2.COLOR_BGR2RGB);
#segmentation mask
rcnn = cv2.imread(os.path.join(data_path, filename.replace('_img','_masksDL')),0);
if args.video: #if video mode, load target background frames
#target background path
back_img10=cv2.imread(os.path.join(args.target_back,filename.replace('_img.png','.png'))); back_img10=cv2.cvtColor(back_img10,cv2.COLOR_BGR2RGB);
#Green-screen background
back_img20=np.zeros(back_img10.shape); back_img20[...,0]=120; back_img20[...,1]=255; back_img20[...,2]=155;
#create multiple frames with adjoining frames
gap=20
multi_fr_w=np.zeros((bgr_img.shape[0],bgr_img.shape[1],4))
idx=[i-2*gap,i-gap,i+gap,i+2*gap]
for t in range(0,4):
if idx[t]<0:
idx[t]=len(test_imgs)+idx[t]
elif idx[t]>=len(test_imgs):
idx[t]=idx[t]-len(test_imgs)
file_tmp=test_imgs[idx[t]]
bgr_img_mul = cv2.imread(os.path.join(data_path, file_tmp));
multi_fr_w[...,t]=cv2.cvtColor(bgr_img_mul,cv2.COLOR_BGR2GRAY);
else:
## create the multi-frame
multi_fr_w=np.zeros((bgr_img.shape[0],bgr_img.shape[1],4))
multi_fr_w[...,0] = cv2.cvtColor(bgr_img,cv2.COLOR_BGR2GRAY);
multi_fr_w[...,1] = multi_fr_w[...,0]
multi_fr_w[...,2] = multi_fr_w[...,0]
multi_fr_w[...,3] = multi_fr_w[...,0]
#crop tightly
bgr_img0=bgr_img;
bbox=get_bbox(rcnn,R=bgr_img0.shape[0],C=bgr_img0.shape[1])
crop_list=[bgr_img,bg_im0,rcnn,back_img10,back_img20,multi_fr_w]
crop_list=crop_images(crop_list,reso,bbox)
bgr_img=crop_list[0]; bg_im=crop_list[1]; rcnn=crop_list[2]; back_img1=crop_list[3]; back_img2=crop_list[4]; multi_fr=crop_list[5]
#process segmentation mask
kernel_er = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
kernel_dil = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
rcnn=rcnn.astype(np.float32)/255; rcnn[rcnn>0.2]=1;
K=25
zero_id=np.nonzero(np.sum(rcnn,axis=1)==0)
del_id=zero_id[0][zero_id[0]>250]
if len(del_id)>0:
del_id=[del_id[0]-2,del_id[0]-1,*del_id]
rcnn=np.delete(rcnn,del_id,0)
rcnn = cv2.copyMakeBorder( rcnn, 0, K + len(del_id), 0, 0, cv2.BORDER_REPLICATE)
rcnn = cv2.erode(rcnn, kernel_er, iterations=10)
rcnn = cv2.dilate(rcnn, kernel_dil, iterations=5)
rcnn=cv2.GaussianBlur(rcnn.astype(np.float32),(31,31),0)
rcnn=(255*rcnn).astype(np.uint8)
rcnn=np.delete(rcnn, range(reso[0],reso[0]+K), 0)
#convert to torch
img=torch.from_numpy(bgr_img.transpose((2, 0, 1))).unsqueeze(0); img=2*img.float().div(255)-1
bg=torch.from_numpy(bg_im.transpose((2, 0, 1))).unsqueeze(0); bg=2*bg.float().div(255)-1
rcnn_al=torch.from_numpy(rcnn).unsqueeze(0).unsqueeze(0); rcnn_al=2*rcnn_al.float().div(255)-1
multi_fr=torch.from_numpy(multi_fr.transpose((2, 0, 1))).unsqueeze(0); multi_fr=2*multi_fr.float().div(255)-1
with torch.no_grad():
img,bg,rcnn_al, multi_fr =Variable(img.cuda()), Variable(bg.cuda()), Variable(rcnn_al.cuda()), Variable(multi_fr.cuda())
input_im=torch.cat([img,bg,rcnn_al,multi_fr],dim=1)
alpha_pred,fg_pred_tmp=netM(img,bg,rcnn_al,multi_fr)
al_mask=(alpha_pred>0.95).type(torch.cuda.FloatTensor)
# for regions with alpha>0.95, simply use the image as fg
fg_pred=img*al_mask + fg_pred_tmp*(1-al_mask)
alpha_out=to_image(alpha_pred[0,...]);
#refine alpha with connected component
labels=label((alpha_out>0.05).astype(int))
try:
assert( labels.max() != 0 )
except:
continue
largestCC = labels == np.argmax(np.bincount(labels.flat)[1:])+1
alpha_out=alpha_out*largestCC
alpha_out=(255*alpha_out[...,0]).astype(np.uint8)
fg_out=to_image(fg_pred[0,...]); fg_out=fg_out*np.expand_dims((alpha_out.astype(float)/255>0.01).astype(float),axis=2); fg_out=(255*fg_out).astype(np.uint8)
#Uncrop
R0=bgr_img0.shape[0];C0=bgr_img0.shape[1]
alpha_out0=uncrop(alpha_out,bbox,R0,C0)
fg_out0=uncrop(fg_out,bbox,R0,C0)
#compose
back_img10=cv2.resize(back_img10,(C0,R0)); back_img20=cv2.resize(back_img20,(C0,R0))
comp_im_tr1=composite4(fg_out0,back_img10,alpha_out0)
comp_im_tr2=composite4(fg_out0,back_img20,alpha_out0)
cv2.imwrite(result_path+'/'+filename.replace('_img','_out'), alpha_out0)
cv2.imwrite(result_path+'/'+filename.replace('_img','_fg'), cv2.cvtColor(fg_out0,cv2.COLOR_BGR2RGB))
cv2.imwrite(result_path+'/'+filename.replace('_img','_compose'), cv2.cvtColor(comp_im_tr1,cv2.COLOR_BGR2RGB))
cv2.imwrite(result_path+'/'+filename.replace('_img','_matte').format(i), cv2.cvtColor(comp_im_tr2,cv2.COLOR_BGR2RGB))
print('Done: ' + str(i+1) + '/' + str(len(test_imgs)))
|
##Copyright 2017 Adobe Systems Inc.
##
##Licensed under the Apache License, Version 2.0 (the "License");
##you may not use this file except in compliance with the License.
##You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
##Unless required by applicable law or agreed to in writing, software
##distributed under the License is distributed on an "AS IS" BASIS,
##WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
##See the License for the specific language governing permissions and
##limitations under the License.
##############################################################
# python compose.py --fg_path fg_train --mask_path mask_train --bg_path bg_train --out_path merged_train --out_csv Adobe_train_data.csv --workers 8
from PIL import Image
from tqdm import tqdm
import argparse
import os
import logging
import math
from multiprocessing.pool import ThreadPool
import threading
parser = argparse.ArgumentParser(description='compose backgrounds and foregrounds')
parser.add_argument('--fg_path', type=str, required=True, help='path to provided foreground images')
parser.add_argument('--mask_path', type=str, required=True, help='path to provided alpha mattes')
parser.add_argument('--bg_path', type=str, required=True, help='path to to background images (MSCOCO)')
parser.add_argument('--out_path', type=str, required=True, help='path to folder where you want the composited images to go')
parser.add_argument('--out_csv', type=str, default=os.devnull, help='path to csv file used by data loader')
parser.add_argument('--num_bgs', type=int, default=100, help='number of backgrounds onto which to paste each foreground')
parser.add_argument('--workers', type=int, default=1, help='maximum workers to use, defaults to 1')
args = parser.parse_args()
fg_path, a_path, bg_path, out_path, num_bgs = args.fg_path, args.mask_path, args.bg_path, args.out_path, args.num_bgs
os.makedirs(out_path, exist_ok=True)
def format_pbar_str(i, im_name):
pbar_prefix = "(" + str(i) + ") "
width = 33 - len(pbar_prefix)
pretty_name = pbar_prefix + ("..." + im_name[-(width - 3):] if len(im_name) > width else im_name)
return pretty_name.rjust(33)
def fixpath(path):
return 'Data_adobe/' + path if not os.path.isabs(path) else path
def composite4(fg, bg, a, w, h):
bg = bg.crop((0,0,w,h))
bg.paste(fg, mask=a)
return bg
def process_foreground_image(i, job):
worker_thread_id = int(threading.current_thread().name.rpartition("-")[-1])
im_name, bg_batch = job
im_name = im_name.replace(fg_path, '')
im = Image.open(os.path.join(fg_path, im_name))
al = Image.open(os.path.join(a_path, im_name))
bbox = im.size
w = bbox[0]
h = bbox[1]
if im.mode != 'RGB' and im.mode != 'RGBA':
im = im.convert('RGB')
if len(al.getbands()) > 0: # take the first channel, usually R
al = al.split()[0]
output_lines = []
with lock:
pbar = tqdm(bg_batch, position=worker_thread_id, desc=format_pbar_str(i, im_name), leave=False)
for b, bg_name in enumerate(pbar):
bg = Image.open(os.path.join(bg_path, bg_name))
if bg.mode != 'RGB':
bg = bg.convert('RGB')
bg_bbox = bg.size
bw = bg_bbox[0]
bh = bg_bbox[1]
wratio = w / bw
hratio = h / bh
ratio = wratio if wratio > hratio else hratio
if ratio > 1:
bg = bg.resize((math.ceil(bw * ratio), math.ceil(bh * ratio)), Image.BICUBIC)
try:
out = composite4(im, bg, al, w, h)
back_idx = i * num_bgs + b
out_name = os.path.join(out_path, im_name[:len(im_name) - 4] + '_' + str(back_idx) + '_comp.png')
out.save(out_name, "PNG")
back = bg.crop((0, 0, w, h))
back_name = os.path.join(out_path, im_name[:len(im_name) - 4] + '_' + str(back_idx) + '_back.png')
back.save(back_name, "PNG")
line = os.path.join(fixpath(fg_path), im_name) + ';' + os.path.join(fixpath(a_path), im_name) + ';' + fixpath(out_name) + ';' + fixpath(back_name) + '\n'
output_lines.append(line)
except Exception as e:
logging.error(f"Composing {im_name} onto {bg_name} failed! Skipping. Error: %s" % e)
with lock:
pbar.update()
with lock:
pbar.close()
return output_lines
fg_files = os.listdir(fg_path)
a_files = os.listdir(a_path)
bg_files = os.listdir(bg_path)
bg_batches = [bg_files[i * num_bgs:(i + 1) * num_bgs] for i in range((len(bg_files) + num_bgs - 1) // num_bgs )]
lock = threading.Lock()
pool = ThreadPool(args.workers)
with lock:
total_pbar = tqdm(total=len(fg_files), position=args.workers+2, desc="TOTAL", leave=True, smoothing=0.0)
def update_total_pbar(_):
with lock:
total_pbar.update(1)
jobs = []
for jobargs in enumerate(zip(fg_files, bg_batches)):
jobs.append(pool.apply_async(process_foreground_image, args=jobargs, callback=update_total_pbar))
pool.close()
pool.join()
output = []
for result in jobs:
output.extend(result.get())
tqdm.write("Done composing...")
with open(args.out_csv, "w") as f:
for line in output:
f.write(line)
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(model_name="mnasnet1_0", test=test, device=device, jit=jit,
batch_size=batch_size, weights=models.MNASNet1_0_Weights.IMAGENET1K_V1,
extra_args=extra_args)
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(name="hf_GPT2", test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
from torchbenchmark.util.framework.vision.model_factory import TorchVisionModel
from torchbenchmark.tasks import COMPUTER_VISION
import torchvision.models as models
class Model(TorchVisionModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 8
DEFAULT_EVAL_BSIZE = 8
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(model_name="resnext50_32x4d", test=test, device=device, jit=jit,
batch_size=batch_size, weights=models.ResNeXt50_32X4D_Weights.IMAGENET1K_V1,
extra_args=extra_args)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: an implementation of a deep learning recommendation model (DLRM)
# The model input consists of dense and sparse features. The former is a vector
# of floating point values. The latter is a list of sparse indices into
# embedding tables, which consist of vectors of floating point values.
# The selected vectors are passed to mlp networks denoted by triangles,
# in some cases the vectors are interacted through operators (Ops).
#
# output:
# vector of values
# model: |
# /\
# /__\
# |
# _____________________> Op <___________________
# / | \
# /\ /\ /\
# /__\ /__\ ... /__\
# | | |
# | Op Op
# | ____/__\_____ ____/__\____
# | |_Emb_|____|__| ... |_Emb_|__|___|
# input:
# [ dense features ] [sparse indices] , ..., [sparse indices]
#
# More precise definition of model layers:
# 1) fully connected layers of an mlp
# z = f(y)
# y = Wx + b
#
# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk])
# z = Op(e1,...,ek)
# obtain vectors e1=E[:,p1], ..., ek=E[:,pk]
#
# 3) Operator Op can be one of the following
# Sum(e1,...,ek) = e1 + ... + ek
# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek]
# Cat(e1,...,ek) = [e1', ..., ek']'
# where ' denotes transpose operation
#
# References:
# [1] Maxim Naumov, Dheevatsa Mudigere, Hao-Jun Michael Shi, Jianyu Huang,
# Narayanan Sundaram, Jongsoo Park, Xiaodong Wang, Udit Gupta, Carole-Jean Wu,
# Alisson G. Azzolini, Dmytro Dzhulgakov, Andrey Mallevich, Ilia Cherniavskii,
# Yinghai Lu, Raghuraman Krishnamoorthi, Ansha Yu, Volodymyr Kondratenko,
# Stephanie Pereira, Xianjie Chen, Wenlin Chen, Vijay Rao, Bill Jia, Liang Xiong,
# Misha Smelyanskiy, "Deep Learning Recommendation Model for Personalization and
# Recommendation Systems", CoRR, arXiv:1906.00091, 2019
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
# others
import operator
import time
import copy
# data generation
import dlrm_data_caffe2 as dc
# numpy
import numpy as np
import sklearn.metrics
# onnx
# The onnx import causes deprecation warnings every time workers
# are spawned during testing. So, we filter out those warnings.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import onnx
import caffe2.python.onnx.frontend
# caffe2
from caffe2.proto import caffe2_pb2
from caffe2.python import brew, core, dyndep, model_helper, net_drawer, workspace
# from caffe2.python.predictor import mobile_exporter
"""
# auxiliary routine used to split input on the mini-bacth dimension
def where_to_split(mini_batch_size, ndevices, _add_leftover=False):
n = (mini_batch_size + ndevices - 1) // ndevices # ceiling
l = mini_batch_size - n * (ndevices - 1) # leftover
s = [n] * (ndevices - 1)
if _add_leftover:
ls += [l if l > 0 else n]
return ls
"""
### define dlrm in Caffe2 ###
class DLRM_Net(object):
def FeedBlobWrapper(self, tag, val, add_prefix=True, split=False, device_id=-1):
if self.ndevices > 1 and add_prefix:
if split:
# split across devices
mini_batch_size = val.shape[0]
# approach 1: np and caffe2 operators assume the mini-batch size is
# divisible exactly by the number of available devices
if mini_batch_size % self.ndevices != 0:
sys.exit("ERROR: caffe2 net assumes that the mini_batch_size "
+ str(mini_batch_size)
+ " is evenly divisible by the number of available devices"
+ str(self.ndevices))
vals = np.split(val, self.ndevices, axis=0)
"""
# approach 2: np and caffe2 operators do not assume exact divisibility
if args.mini_batch_size != mini_batch_size:
sys.exit("ERROR: caffe2 net was prepared for mini-batch size "
+ str(args.mini_batch_size)
+ " which is different from current mini-batch size "
+ str(mini_batch_size) + " being passed to it. "
+ "This is common for the last mini-batch, when "
+ "mini-batch size does not evenly divided the number of "
+ "elements in the data set.")
ls = where_to_split(mini_batch_size, self.ndevices)
vals = np.split(val, ls, axis=0)
"""
# feed to multiple devices
for d in range(self.ndevices):
tag_on_device = "gpu_" + str(d) + "/" + tag
_d = core.DeviceOption(workspace.GpuDeviceType, d)
workspace.FeedBlob(tag_on_device, vals[d], device_option=_d)
else:
# feed to multiple devices
for d in range(self.ndevices):
tag_on_device = "gpu_" + str(d) + "/" + tag
_d = core.DeviceOption(workspace.GpuDeviceType, d)
workspace.FeedBlob(tag_on_device, val, device_option=_d)
else:
# feed to a single device (named or not)
if device_id >= 0:
_d = core.DeviceOption(workspace.GpuDeviceType, device_id)
workspace.FeedBlob(tag, val, device_option=_d)
else:
workspace.FeedBlob(tag, val)
def FetchBlobWrapper(self, tag, add_prefix=True, reduce_across=None, device_id=-1):
if self.ndevices > 1 and add_prefix:
# fetch from multiple devices
vals = []
for d in range(self.ndevices):
if tag.__class__ == list:
tag_on_device = tag[d]
else:
tag_on_device = "gpu_" + str(0) + "/" + tag
val = workspace.FetchBlob(tag_on_device)
vals.append(val)
# reduce across devices
if reduce_across == "add":
return functools.reduce(operator.add, vals)
elif reduce_across == "concat":
return np.concatenate(vals)
else:
return vals
else:
# fetch from a single device (named or not)
if device_id >= 0:
tag_on_device = "gpu_" + str(device_id) + "/" + tag
return workspace.FetchBlob(tag_on_device)
else:
return workspace.FetchBlob(tag)
def AddLayerWrapper(self, layer, inp_blobs, out_blobs,
add_prefix=True, reset_grad=False, **kwargs):
# auxiliary routine to adjust tags
def adjust_tag(blobs, on_device):
if blobs.__class__ == str:
_blobs = on_device + blobs
elif blobs.__class__ == list:
_blobs = list(map(lambda tag: on_device + tag, blobs))
else: # blobs.__class__ == model_helper.ModelHelper or something else
_blobs = blobs
return _blobs
if self.ndevices > 1 and add_prefix:
# add layer on multiple devices
ll = []
for d in range(self.ndevices):
# add prefix on_device
on_device = "gpu_" + str(d) + "/"
_inp_blobs = adjust_tag(inp_blobs, on_device)
_out_blobs = adjust_tag(out_blobs, on_device)
# WARNING: reset_grad option was exlusively designed for WeightedSum
# with inp_blobs=[w, tag_one, "", lr], where "" will be replaced
if reset_grad:
w_grad = self.gradientMap[_inp_blobs[0]]
_inp_blobs[2] = w_grad
# add layer to the model
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)):
if kwargs:
new_layer = layer(_inp_blobs, _out_blobs, **kwargs)
else:
new_layer = layer(_inp_blobs, _out_blobs)
ll.append(new_layer)
return ll
else:
# add layer on a single device
# WARNING: reset_grad option was exlusively designed for WeightedSum
# with inp_blobs=[w, tag_one, "", lr], where "" will be replaced
if reset_grad:
w_grad = self.gradientMap[inp_blobs[0]]
inp_blobs[2] = w_grad
# add layer to the model
if kwargs:
new_layer = layer(inp_blobs, out_blobs, **kwargs)
else:
new_layer = layer(inp_blobs, out_blobs)
return new_layer
def create_mlp(self, ln, sigmoid_layer, model, tag):
(tag_layer, tag_in, tag_out) = tag
# build MLP layer by layer
layers = []
weights = []
for i in range(1, ln.size):
n = ln[i - 1]
m = ln[i]
# create tags
tag_fc_w = tag_layer + ":::" + "fc" + str(i) + "_w"
tag_fc_b = tag_layer + ":::" + "fc" + str(i) + "_b"
tag_fc_y = tag_layer + ":::" + "fc" + str(i) + "_y"
tag_fc_z = tag_layer + ":::" + "fc" + str(i) + "_z"
if i == ln.size - 1:
tag_fc_z = tag_out
weights.append(tag_fc_w)
weights.append(tag_fc_b)
# initialize the weights
# approach 1: custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
b = np.random.normal(mean, std_dev, size=m).astype(np.float32)
self.FeedBlobWrapper(tag_fc_w, W)
self.FeedBlobWrapper(tag_fc_b, b)
# approach 2: caffe2 xavier
# W = self.AddLayerWrapper(
# model.param_init_net.XavierFill,
# [],
# tag_fc_w,
# shape=[m, n]
# )
# b = self.AddLayerWrapper(
# model.param_init_net.ConstantFill,
# [],
# tag_fc_b,
# shape=[m]
# )
# save the blob shapes for latter (only needed if onnx is requested)
if self.save_onnx:
self.onnx_tsd[tag_fc_w] = (onnx.TensorProto.FLOAT, W.shape)
self.onnx_tsd[tag_fc_b] = (onnx.TensorProto.FLOAT, b.shape)
# approach 1: construct fully connected operator using model.net
fc = self.AddLayerWrapper(
model.net.FC, [tag_in, tag_fc_w, tag_fc_b], tag_fc_y
)
# approach 2: construct fully connected operator using brew
# https://github.com/caffe2/tutorials/blob/master/MNIST.ipynb
# fc = brew.fc(model, layer, tag_fc_w, dim_in=m, dim_out=n)
layers.append(fc)
if i == sigmoid_layer:
# approach 1: construct sigmoid operator using model.net
layer = self.AddLayerWrapper(model.net.Sigmoid, tag_fc_y, tag_fc_z)
# approach 2: using brew (which currently does not support sigmoid)
# tag_sigm = tag_layer + ":::" + "sigmoid" + str(i)
# layer = brew.sigmoid(model,fc,tag_sigmoid)
else:
# approach 1: construct relu operator using model.net
layer = self.AddLayerWrapper(model.net.Relu, tag_fc_y, tag_fc_z)
# approach 2: using brew
# tag_relu = tag_layer + ":::" + "relu" + str(i)
# layer = brew.relu(model,fc,tag_relu)
tag_in = tag_fc_z
layers.append(layer)
# WARNING: the dependency between layers is implicit in the tags,
# so only the last layer is added to the layers list. It will
# later be used for interactions.
return layers, weights
def create_emb(self, m, ln, model, tag):
(tag_layer, tag_in, tag_out) = tag
emb_l = []
weights_l = []
for i in range(0, ln.size):
n = ln[i]
# select device
if self.ndevices > 1:
d = i % self.ndevices
else:
d = -1
# create tags
on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/"
len_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_l"
ind_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_i"
tbl_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_w"
sum_s = on_device + tag_layer + ":::" + "sls" + str(i) + "_z"
weights_l.append(tbl_s)
# initialize the weights
# approach 1a: custom
W = np.random.uniform(low=-np.sqrt(1 / n),
high=np.sqrt(1 / n),
size=(n, m)).astype(np.float32)
# approach 1b: numpy rand
# W = ra.rand(n, m).astype(np.float32)
self.FeedBlobWrapper(tbl_s, W, False, device_id=d)
# approach 2: caffe2 xavier
# with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)):
# W = model.param_init_net.XavierFill([], tbl_s, shape=[n, m])
# save the blob shapes for latter (only needed if onnx is requested)
if self.save_onnx:
self.onnx_tsd[tbl_s] = (onnx.TensorProto.FLOAT, W.shape)
# create operator
if self.ndevices <= 1:
EE = model.net.SparseLengthsSum([tbl_s, ind_s, len_s], [sum_s])
else:
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)):
EE = model.net.SparseLengthsSum([tbl_s, ind_s, len_s], [sum_s])
emb_l.append(EE)
return emb_l, weights_l
def create_interactions(self, x, ly, model, tag):
(tag_dense_in, tag_sparse_in, tag_int_out) = tag
if self.arch_interaction_op == "dot":
# concatenate dense and sparse features
tag_int_out_info = tag_int_out + "_info"
T, T_info = model.net.Concat(
x + ly,
[tag_int_out + "_cat_axis0", tag_int_out_info + "_cat_axis0"],
axis=1,
add_axis=1,
)
# perform a dot product
Z = model.net.BatchMatMul([T, T], tag_int_out + "_matmul", trans_b=1)
# append dense feature with the interactions (into a row vector)
# approach 1: all
# Zflat = model.net.Flatten(Z, tag_int_out + "_flatten", axis=1)
# approach 2: unique
Zflat_all = model.net.Flatten(Z, tag_int_out + "_flatten_all", axis=1)
Zflat = model.net.BatchGather(
[Zflat_all, tag_int_out + "_tril_indices"],
tag_int_out + "_flatten"
)
R, R_info = model.net.Concat(
x + [Zflat], [tag_int_out, tag_int_out_info], axis=1
)
elif self.arch_interaction_op == "cat":
# concatenation features (into a row vector)
tag_int_out_info = tag_int_out + "_info"
R, R_info = model.net.Concat(
x + ly, [tag_int_out, tag_int_out_info], axis=1
)
else:
sys.exit("ERROR: --arch-interaction-op="
+ self.arch_interaction_op + " is not supported")
return R
def create_sequential_forward_ops(self):
# embeddings
tag = (self.temb, self.tsin, self.tsout)
self.emb_l, self.emb_w = self.create_emb(self.m_spa, self.ln_emb,
self.model, tag)
# bottom mlp
tag = (self.tbot, self.tdin, self.tdout)
self.bot_l, self.bot_w = self.create_mlp(self.ln_bot, self.sigmoid_bot,
self.model, tag)
# interactions
tag = (self.tdout, self.tsout, self.tint)
Z = self.create_interactions([self.bot_l[-1]], self.emb_l, self.model, tag)
# top mlp
tag = (self.ttop, Z, self.tout)
self.top_l, self.top_w = self.create_mlp(self.ln_top, self.sigmoid_top,
self.model, tag)
# debug prints
# print(self.emb_l)
# print(self.bot_l)
# print(self.top_l)
# setup the last output variable
self.last_output = self.top_l[-1]
def create_parallel_forward_ops(self):
# distribute embeddings (model parallelism)
tag = (self.temb, self.tsin, self.tsout)
self.emb_l, self.emb_w = self.create_emb(self.m_spa, self.ln_emb,
self.model, tag)
# replicate mlp (data parallelism)
tag = (self.tbot, self.tdin, self.tdout)
self.bot_l, self.bot_w = self.create_mlp(self.ln_bot, self.sigmoid_bot,
self.model, tag)
# add communication (butterfly shuffle)
t_list = []
for i, emb_output in enumerate(self.emb_l):
# split input
src_d = i % self.ndevices
lo = [emb_output + "_split_" + str(d) for d in range(self.ndevices)]
# approach 1: np and caffe2 operators assume the mini-batch size is
# divisible exactly by the number of available devices
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, src_d)):
self.model.net.Split(emb_output, lo, axis=0)
"""
# approach 2: np and caffe2 operators do not assume exact divisibility
ls = where_to_split(args.mini_batch_size, self.ndevices, _add_leftover=True)
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, src_d)):
emb_output_split = self.model.net.Split(
emb_output, lo, split=lp, axis=0
)
"""
# scatter
y = []
for dst_d in range(len(lo)):
src_blob = lo[dst_d]
dst_blob = str(src_blob).replace(
"gpu_" + str(src_d), "gpu_" + str(dst_d), 1
)
if src_blob != dst_blob:
with core.DeviceScope(
core.DeviceOption(workspace.GpuDeviceType, dst_d)
):
blob = self.model.Copy(src_blob, dst_blob)
else:
blob = dst_blob
y.append(blob)
t_list.append(y)
# adjust lists to be ordered per device
x = list(map(lambda x: list(x), zip(*self.bot_l)))
ly = list(map(lambda y: list(y), zip(*t_list)))
# interactions
for d in range(self.ndevices):
on_device = "gpu_" + str(d) + "/"
tag = (on_device + self.tdout, on_device + self.tsout, on_device + self.tint)
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)):
self.create_interactions([x[d][-1]], ly[d], self.model, tag)
# replicate mlp (data parallelism)
tag = (self.ttop, self.tint, self.tout)
self.top_l, self.top_w = self.create_mlp(self.ln_top, self.sigmoid_top,
self.model, tag)
# debug prints
# print(self.model.net.Proto(),end='\n')
# sys.exit("ERROR: debugging")
# setup the last output variable
self.last_output = self.top_l[-1]
def __init__(
self,
m_spa,
ln_emb,
ln_bot,
ln_top,
arch_interaction_op,
arch_interaction_itself=False,
sigmoid_bot=-1,
sigmoid_top=-1,
save_onnx=False,
model=None,
test_net=None,
tag=None,
ndevices=-1,
forward_ops=True,
enable_prof=False,
):
super(DLRM_Net, self).__init__()
# init model
if model is None:
global_init_opt = ["caffe2", "--caffe2_log_level=0"]
if enable_prof:
global_init_opt += [
"--logtostderr=0",
"--log_dir=$HOME",
"--caffe2_logging_print_net_summary=1",
]
workspace.GlobalInit(global_init_opt)
self.set_tags()
self.model = model_helper.ModelHelper(name="DLRM", init_params=True)
self.test_net = None
else:
# WARNING: assume that workspace and tags have been initialized elsewhere
self.set_tags(tag[0], tag[1], tag[2], tag[3], tag[4], tag[5], tag[6],
tag[7], tag[8], tag[9])
self.model = model
self.test_net = test_net
# save arguments
self.m_spa = m_spa
self.ln_emb = ln_emb
self.ln_bot = ln_bot
self.ln_top = ln_top
self.arch_interaction_op = arch_interaction_op
self.arch_interaction_itself = arch_interaction_itself
self.sigmoid_bot = sigmoid_bot
self.sigmoid_top = sigmoid_top
self.save_onnx = save_onnx
self.ndevices = ndevices
# onnx types and shapes dictionary
if self.save_onnx:
self.onnx_tsd = {}
# create forward operators
if forward_ops:
if self.ndevices <= 1:
return self.create_sequential_forward_ops()
else:
return self.create_parallel_forward_ops()
def set_tags(
self,
_tag_layer_top_mlp="top",
_tag_layer_bot_mlp="bot",
_tag_layer_embedding="emb",
_tag_feature_dense_in="dense_in",
_tag_feature_dense_out="dense_out",
_tag_feature_sparse_in="sparse_in",
_tag_feature_sparse_out="sparse_out",
_tag_interaction="interaction",
_tag_dense_output="prob_click",
_tag_dense_target="target",
):
# layer tags
self.ttop = _tag_layer_top_mlp
self.tbot = _tag_layer_bot_mlp
self.temb = _tag_layer_embedding
# dense feature tags
self.tdin = _tag_feature_dense_in
self.tdout = _tag_feature_dense_out
# sparse feature tags
self.tsin = _tag_feature_sparse_in
self.tsout = _tag_feature_sparse_out
# output and target tags
self.tint = _tag_interaction
self.ttar = _tag_dense_target
self.tout = _tag_dense_output
def parameters(self):
return self.model
def get_loss(self):
return self.FetchBlobWrapper(self.loss, reduce_across="add")
def get_output(self):
return self.FetchBlobWrapper(self.last_output, reduce_across="concat")
def create(self, X, S_lengths, S_indices, T):
self.create_input(X, S_lengths, S_indices, T)
self.create_model(X, S_lengths, S_indices, T)
def create_input(self, X, S_lengths, S_indices, T):
# feed input data to blobs
self.FeedBlobWrapper(self.tdin, X, split=True)
# save the blob shapes for latter (only needed if onnx is requested)
if self.save_onnx:
self.onnx_tsd[self.tdin] = (onnx.TensorProto.FLOAT, X.shape)
for i in range(len(self.emb_l)):
# select device
if self.ndevices > 1:
d = i % self.ndevices
else:
d = -1
# create tags
on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/"
len_s = on_device + self.temb + ":::" + "sls" + str(i) + "_l"
ind_s = on_device + self.temb + ":::" + "sls" + str(i) + "_i"
self.FeedBlobWrapper(len_s, np.array(S_lengths[i]), False, device_id=d)
self.FeedBlobWrapper(ind_s, np.array(S_indices[i]), False, device_id=d)
# save the blob shapes for latter (only needed if onnx is requested)
if self.save_onnx:
lshape = (len(S_lengths[i]),) # =args.mini_batch_size
ishape = (len(S_indices[i]),)
self.onnx_tsd[len_s] = (onnx.TensorProto.INT32, lshape)
self.onnx_tsd[ind_s] = (onnx.TensorProto.INT32, ishape)
# feed target data to blobs
if T is not None:
zeros_fp32 = np.zeros(T.shape).astype(np.float32)
self.FeedBlobWrapper(self.ttar, zeros_fp32, split=True)
# save the blob shapes for latter (only needed if onnx is requested)
if self.save_onnx:
self.onnx_tsd[self.ttar] = (onnx.TensorProto.FLOAT, T.shape)
def create_model(self, X, S_lengths, S_indices, T):
#setup tril indices for the interactions
offset = 1 if self.arch_interaction_itself else 0
num_fea = len(self.emb_l) + 1
tril_indices = np.array([j + i * num_fea
for i in range(num_fea) for j in range(i + offset)])
self.FeedBlobWrapper(self.tint + "_tril_indices", tril_indices)
if self.save_onnx:
tish = tril_indices.shape
self.onnx_tsd[self.tint + "_tril_indices"] = (onnx.TensorProto.INT32, tish)
# create compute graph
if T is not None:
# WARNING: RunNetOnce call is needed only if we use brew and ConstantFill.
# We could use direct calls to self.model functions above to avoid it
workspace.RunNetOnce(self.model.param_init_net)
workspace.CreateNet(self.model.net)
if self.test_net is not None:
workspace.CreateNet(self.test_net)
def run(self, X, S_lengths, S_indices, T, test_net=False, enable_prof=False):
# feed input data to blobs
# dense features
self.FeedBlobWrapper(self.tdin, X, split=True)
# sparse features
for i in range(len(self.emb_l)):
# select device
if self.ndevices > 1:
d = i % self.ndevices
else:
d = -1
# create tags
on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/"
len_s = on_device + self.temb + ":::" + "sls" + str(i) + "_l"
ind_s = on_device + self.temb + ":::" + "sls" + str(i) + "_i"
self.FeedBlobWrapper(len_s, np.array(S_lengths[i]), False, device_id=d)
self.FeedBlobWrapper(ind_s, np.array(S_indices[i]), False, device_id=d)
# feed target data to blobs if needed
if T is not None:
self.FeedBlobWrapper(self.ttar, T, split=True)
# execute compute graph
if test_net:
workspace.RunNet(self.test_net)
else:
if enable_prof:
workspace.C.benchmark_net(self.model.net.Name(), 0, 1, True)
else:
workspace.RunNet(self.model.net)
# debug prints
# print("intermediate")
# print(self.FetchBlobWrapper(self.bot_l[-1]))
# for tag_emb in self.emb_l:
# print(self.FetchBlobWrapper(tag_emb))
# print(self.FetchBlobWrapper(self.tint))
def MSEloss(self, scale=1.0):
# add MSEloss to the model
self.AddLayerWrapper(self.model.SquaredL2Distance, [self.tout, self.ttar], "sd")
self.AddLayerWrapper(self.model.Scale, "sd", "sd2", scale=2.0 * scale)
# WARNING: "loss" is a special tag and should not be changed
self.loss = self.AddLayerWrapper(self.model.AveragedLoss, "sd2", "loss")
def BCEloss(self, scale=1.0, threshold=0.0):
# add BCEloss to the mode
if 0.0 < threshold and threshold < 1.0:
self.AddLayerWrapper(self.model.Clip, self.tout, "tout_c",
min=threshold, max=(1.0 - threshold))
self.AddLayerWrapper(self.model.MakeTwoClass, "tout_c", "tout_2c")
else:
self.AddLayerWrapper(self.model.MakeTwoClass, self.tout, "tout_2c")
self.AddLayerWrapper(self.model.LabelCrossEntropy, ["tout_2c", self.ttar], "sd")
# WARNING: "loss" is a special tag and should not be changed
if scale == 1.0:
self.loss = self.AddLayerWrapper(self.model.AveragedLoss, "sd", "loss")
else:
self.AddLayerWrapper(self.model.Scale, "sd", "sd2", scale=scale)
self.loss = self.AddLayerWrapper(self.model.AveragedLoss, "sd2", "loss")
def sgd_optimizer(self, learning_rate,
T=None, _gradientMap=None, sync_dense_params=True):
# create one, it and lr tags (or use them if already present)
if T is not None:
(tag_one, tag_it, tag_lr) = T
else:
(tag_one, tag_it, tag_lr) = ("const_one", "optim_it", "optim_lr")
# approach 1: feed values directly
# self.FeedBlobWrapper(tag_one, np.ones(1).astype(np.float32))
# self.FeedBlobWrapper(tag_it, np.zeros(1).astype(np.int64))
# it = self.AddLayerWrapper(self.model.Iter, tag_it, tag_it)
# lr = self.AddLayerWrapper(self.model.LearningRate, tag_it, tag_lr,
# base_lr=-1 * learning_rate, policy="fixed")
# approach 2: use brew
self.AddLayerWrapper(self.model.param_init_net.ConstantFill,
[], tag_one, shape=[1], value=1.0)
self.AddLayerWrapper(brew.iter, self.model, tag_it)
self.AddLayerWrapper(self.model.LearningRate, tag_it, tag_lr,
base_lr=-1 * learning_rate, policy="fixed")
# save the blob shapes for latter (only needed if onnx is requested)
if self.save_onnx:
self.onnx_tsd[tag_one] = (onnx.TensorProto.FLOAT, (1,))
self.onnx_tsd[tag_it] = (onnx.TensorProto.INT64, (1,))
# create gradient maps (or use them if already present)
if _gradientMap is not None:
self.gradientMap = _gradientMap
else:
if self.loss.__class__ == list:
self.gradientMap = self.model.AddGradientOperators(self.loss)
else:
self.gradientMap = self.model.AddGradientOperators([self.loss])
# update weights
# approach 1: builtin function
# optimizer.build_sgd(self.model, base_learning_rate=learning_rate)
# approach 2: custom code
# top MLP weight and bias
for w in self.top_w:
# allreduce across devices if needed
if sync_dense_params and self.ndevices > 1:
grad_blobs = [
self.gradientMap["gpu_{}/".format(d) + w]
for d in range(self.ndevices)
]
self.model.NCCLAllreduce(grad_blobs, grad_blobs)
# update weights
self.AddLayerWrapper(self.model.WeightedSum,
[w, tag_one, "", tag_lr], w, reset_grad=True)
# bottom MLP weight and bias
for w in self.bot_w:
# allreduce across devices if needed
if sync_dense_params and self.ndevices > 1:
grad_blobs = [
self.gradientMap["gpu_{}/".format(d) + w]
for d in range(self.ndevices)
]
self.model.NCCLAllreduce(grad_blobs, grad_blobs)
# update weights
self.AddLayerWrapper(self.model.WeightedSum,
[w, tag_one, "", tag_lr], w, reset_grad=True)
# update embeddings
for i, w in enumerate(self.emb_w):
# select device
if self.ndevices > 1:
d = i % self.ndevices
# create tags
on_device = "" if self.ndevices <= 1 else "gpu_" + str(d) + "/"
_tag_one = on_device + tag_one
_tag_lr = on_device + tag_lr
# pickup gradient
w_grad = self.gradientMap[w]
# update weights
if self.ndevices > 1:
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, d)):
self.model.ScatterWeightedSum([w, _tag_one, w_grad.indices,
w_grad.values, _tag_lr], w)
else:
self.model.ScatterWeightedSum([w, _tag_one, w_grad.indices,
w_grad.values, _tag_lr], w)
def print_all(self):
# approach 1: all
print(workspace.Blobs(), end='\n')
for _, l in enumerate(workspace.Blobs()):
print(l)
print(self.FetchBlobWrapper(l))
# approach 2: only summary
# for param in self.model.params:
# self.model.Summarize(param, [], to_file=1)
# self.model.Summarize(self.model.param_to_grad[param], [], to_file=1)
def print_weights(self):
for _, l in enumerate(self.emb_w):
# print(l)
print(self.FetchBlobWrapper(l, False))
for _, l in enumerate(self.bot_w):
# print(l)
if self.ndevices > 1:
print(self.FetchBlobWrapper(l, False, device_id=0))
else:
print(self.FetchBlobWrapper(l))
for _, l in enumerate(self.top_w):
# print(l)
if self.ndevices > 1:
print(self.FetchBlobWrapper(l, False, device_id=0))
else:
print(self.FetchBlobWrapper(l))
def print_activations(self):
for _, l in enumerate(self.emb_l):
print(l)
print(self.FetchBlobWrapper(l, False))
for _, l in enumerate(self.bot_l):
print(l)
print(self.FetchBlobWrapper(l))
print(self.tint)
print(self.FetchBlobWrapper(self.tint))
for _, l in enumerate(self.top_l):
print(l)
print(self.FetchBlobWrapper(l))
def define_metrics():
metrics = {
'loss': lambda y_true, y_score:
sklearn.metrics.log_loss(
y_true=y_true,
y_pred=y_score,
labels=[0,1]),
'recall': lambda y_true, y_score:
sklearn.metrics.recall_score(
y_true=y_true,
y_pred=np.round(y_score)
),
'precision': lambda y_true, y_score:
sklearn.metrics.precision_score(
y_true=y_true,
y_pred=np.round(y_score)
),
'f1': lambda y_true, y_score:
sklearn.metrics.f1_score(
y_true=y_true,
y_pred=np.round(y_score)
),
'ap': sklearn.metrics.average_precision_score,
'roc_auc': sklearn.metrics.roc_auc_score,
'accuracy': lambda y_true, y_score:
sklearn.metrics.accuracy_score(
y_true=y_true,
y_pred=np.round(y_score)
),
# 'pre_curve' : sklearn.metrics.precision_recall_curve,
# 'roc_curve' : sklearn.metrics.roc_curve,
}
return metrics
def calculate_metrics(targets, scores):
scores = np.concatenate(scores, axis=0)
targets = np.concatenate(targets, axis=0)
metrics = define_metrics()
# print("Compute time for validation metric : ", end="")
# first_it = True
validation_results = {}
for metric_name, metric_function in metrics.items():
# if first_it:
# first_it = False
# else:
# print(", ", end="")
# metric_compute_start = time_wrap(False)
try:
validation_results[metric_name] = metric_function(
targets,
scores
)
except Exception as error :
validation_results[metric_name] = -1
print("{} in calculating {}".format(error, metric_name))
# metric_compute_end = time_wrap(False)
# met_time = metric_compute_end - metric_compute_start
# print("{} {:.4f}".format(metric_name, 1000 * (met_time)),
# end="")
# print(" ms")
return validation_results
if __name__ == "__main__":
### import packages ###
import sys
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
parser.add_argument("--arch-embedding-size", type=str, default="4-3-2")
parser.add_argument("--arch-mlp-bot", type=str, default="4-3-2")
parser.add_argument("--arch-mlp-top", type=str, default="4-2-1")
parser.add_argument("--arch-interaction-op", type=str, default="dot")
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
# activations and loss
parser.add_argument("--activation-function", type=str, default="relu")
parser.add_argument("--loss-function", type=str, default="mse") # or bce
parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7
parser.add_argument("--round-targets", type=bool, default=False)
# data
parser.add_argument("--data-size", type=int, default=1)
parser.add_argument("--num-batches", type=int, default=0)
parser.add_argument("--data-generation", type=str, default="random") # or synthetic or dataset
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--data-trace-enable-padding", type=bool, default=False)
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-indices-per-lookup", type=int, default=10)
parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False)
parser.add_argument("--memory-map", action="store_true", default=False)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.01)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--sync-dense-params", type=bool, default=True)
parser.add_argument("--caffe2-net-type", type=str, default="")
# inference
parser.add_argument("--inference-only", action="store_true", default=False)
# onnx (or protobuf with shapes)
parser.add_argument("--save-onnx", action="store_true", default=False)
parser.add_argument("--save-proto-types-shapes", action="store_true", default=False)
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=-1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--plot-compute-graph", action="store_true", default=False)
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
# stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107
parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0)
# stop at target AUC Terabyte (no subsampling) 0.8025
parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0)
args = parser.parse_args()
### some basic setup ###
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
use_gpu = args.use_gpu
if use_gpu:
device_opt = core.DeviceOption(workspace.GpuDeviceType, 0)
ngpus = workspace.NumGpuDevices() # 1
print("Using {} GPU(s)...".format(ngpus))
else:
device_opt = core.DeviceOption(caffe2_pb2.CPU)
print("Using CPU...")
### prepare training data ###
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
if args.data_generation == "dataset":
# input and target from dataset
(nbatches, lX, lS_l, lS_i, lT,
nbatches_test, lX_test, lS_l_test, lS_i_test, lT_test,
ln_emb, m_den) = dc.read_dataset(
args.data_set, args.max_ind_range, args.data_sub_sample_rate,
args.mini_batch_size, args.num_batches, args.data_randomize, "train",
args.raw_data_file, args.processed_data_file, args.memory_map
)
# enforce maximum limit on number of vectors per embedding
if args.max_ind_range > 0:
ln_emb = np.array(list(map(
lambda x: x if x < args.max_ind_range else args.max_ind_range,
ln_emb
)))
ln_bot[0] = m_den
else:
# input and target at random
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
m_den = ln_bot[0]
(nbatches, lX, lS_l, lS_i, lT) = dc.generate_random_data(
m_den, ln_emb, args.data_size, args.num_batches, args.mini_batch_size,
args.num_indices_per_lookup, args.num_indices_per_lookup_fixed,
1, args.round_targets, args.data_generation, args.data_trace_file,
args.data_trace_enable_padding
)
### parse command line arguments ###
m_spa = args.arch_sparse_feature_size
num_fea = ln_emb.size + 1 # num sparse + num dense features
m_den_out = ln_bot[ln_bot.size - 1]
if args.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit("ERROR: --arch-interaction-op="
+ args.arch_interaction_op + " is not supported")
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sanity check: feature sizes and mlp dimensions must match
if m_den != ln_bot[0]:
sys.exit("ERROR: arch-dense-feature-size "
+ str(m_den) + " does not match first dim of bottom mlp " + str(ln_bot[0]))
if m_spa != m_den_out:
sys.exit("ERROR: arch-sparse-feature-size "
+ str(m_spa) + " does not match last dim of bottom mlp " + str(m_den_out))
if num_int != ln_top[0]:
sys.exit("ERROR: # of feature interactions "
+ str(num_int) + " does not match first dim of top mlp " + str(ln_top[0]))
# test prints (model arch)
if args.debug_mode:
print("model arch:")
print("mlp top arch " + str(ln_top.size - 1)
+ " layers, with input to output dimensions:")
print(ln_top)
print("# of interactions")
print(num_int)
print("mlp bot arch " + str(ln_bot.size - 1)
+ " layers, with input to output dimensions:")
print(ln_bot)
print("# of features (sparse and dense)")
print(num_fea)
print("dense feature size")
print(m_den)
print("sparse feature size")
print(m_spa)
print("# of embeddings (= # of sparse features) " + str(ln_emb.size)
+ ", with dimensions " + str(m_spa) + "x:")
print(ln_emb)
print("data (inputs and targets):")
for j in range(0, nbatches):
print("mini-batch: %d" % j)
print(lX[j])
print(lS_l[j])
print(lS_i[j])
print(lT[j].astype(np.float32))
### construct the neural network specified above ###
# WARNING: to obtain exactly the same initialization for
# the weights we need to start from the same random seed.
# np.random.seed(args.numpy_rand_seed)
ndevices = min(ngpus, args.mini_batch_size, num_fea - 1) if use_gpu else -1
flag_types_shapes = args.save_onnx or args.save_proto_types_shapes
flag_forward_ops = not (use_gpu and ndevices > 1)
with core.DeviceScope(device_opt):
dlrm = DLRM_Net(
m_spa,
ln_emb,
ln_bot,
ln_top,
args.arch_interaction_op,
arch_interaction_itself=args.arch_interaction_itself,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 1,
save_onnx=flag_types_shapes,
ndevices=ndevices,
# forward_ops = flag_forward_ops
enable_prof=args.enable_profiling,
)
# load nccl if using multiple devices
if args.sync_dense_params and ndevices > 1:
dyndep.InitOpsLibrary("//caffe2/caffe2/contrib/nccl:nccl_ops")
# set the net type for better performance (dag, async_scheduling, etc)
if args.caffe2_net_type:
dlrm.parameters().net.Proto().type = args.caffe2_net_type
# plot compute graph
if args.plot_compute_graph:
graph = net_drawer.GetPydotGraph(
dlrm.parameters().net,
"dlrm_s_caffe2_graph",
"BT"
)
graph.write_pdf(graph.get_name() + ".pdf")
# test prints
if args.debug_mode:
print("initial parameters (weights and bias):")
dlrm.print_weights()
# add training loss if needed
if not args.inference_only:
with core.DeviceScope(device_opt):
# specify the loss function
nd = 1.0 if dlrm.ndevices <= 1 else 1.0 / dlrm.ndevices # 1
if args.loss_function == "mse":
dlrm.MSEloss(scale=nd)
elif args.loss_function == "bce":
dlrm.BCEloss(scale=nd, threshold=args.loss_threshold)
else:
sys.exit("ERROR: --loss-function=" + args.loss_function
+ " is not supported")
# define test net (as train net without gradients)
dlrm.test_net = core.Net(copy.deepcopy(dlrm.model.net.Proto()))
# specify the optimizer algorithm
dlrm.sgd_optimizer(
args.learning_rate, sync_dense_params=args.sync_dense_params
)
# init/create
dlrm.create(lX[0], lS_l[0], lS_i[0], lT[0])
### main loop ###
best_gA_test = 0
best_auc_test = 0
total_time = 0
total_loss = 0
total_accu = 0
total_iter = 0
total_samp = 0
k = 0
print("time/loss/accuracy (if enabled):")
while k < args.nepochs:
j = 0
while j < nbatches:
'''
# debug prints
print("input and targets")
print(lX[j])
print(lS_l[j])
print(lS_i[j])
print(lT[j].astype(np.float32))
'''
# forward and backward pass, where the latter runs only
# when gradients and loss have been added to the net
time1 = time.time()
dlrm.run(lX[j], lS_l[j], lS_i[j], lT[j]) # args.enable_profiling
time2 = time.time()
total_time += time2 - time1
# compte loss and accuracy
Z = dlrm.get_output() # numpy array
T = lT[j] # numpy array
'''
# debug prints
print("output and loss")
print(Z)
print(dlrm.get_loss())
'''
mbs = T.shape[0] # = args.mini_batch_size except maybe for last
A = np.sum((np.round(Z, 0) == T).astype(np.uint8))
total_accu += 0 if args.inference_only else A
total_loss += 0 if args.inference_only else dlrm.get_loss() * mbs
total_iter += 1
total_samp += mbs
# print time, loss and accuracy
should_print = ((j + 1) % args.print_freq == 0) or (j + 1 == nbatches)
should_test = (
(args.test_freq > 0)
and (args.data_generation == "dataset")
and (((j + 1) % args.test_freq == 0) or (j + 1 == nbatches))
)
if should_print or should_test:
gT = 1000. * total_time / total_iter if args.print_time else -1
total_time = 0
gA = total_accu / total_samp
total_accu = 0
gL = total_loss / total_samp
total_loss = 0
str_run_type = "inference" if args.inference_only else "training"
print(
"Finished {} it {}/{} of epoch {}, {:.2f} ms/it,".format(
str_run_type, j + 1, nbatches, k, gT
)
+ " loss {:.6f}, accuracy {:3.3f} %".format(gL, gA * 100)
)
total_iter = 0
total_samp = 0
# debug prints
# print(Z)
# print(T)
# testing
if should_test and not args.inference_only:
# don't measure training iter time in a test iteration
if args.mlperf_logging:
previous_iteration_time = None
test_accu = 0
test_loss = 0
test_samp = 0
if args.mlperf_logging:
scores = []
targets = []
for i in range(nbatches_test):
# early exit if nbatches was set by the user and was exceeded
if nbatches > 0 and i >= nbatches:
break
# forward pass
dlrm.run(lX_test[i], lS_l_test[i], lS_i_test[i], lT_test[i], test_net=True)
Z_test = dlrm.get_output()
T_test = lT_test[i]
if args.mlperf_logging:
scores.append(Z_test)
targets.append(T_test)
else:
# compte loss and accuracy
L_test = dlrm.get_loss()
mbs_test = T_test.shape[0] # = mini_batch_size except last
A_test = np.sum((np.round(Z_test, 0) == T_test).astype(np.uint8))
test_accu += A_test
test_loss += L_test * mbs_test
test_samp += mbs_test
# compute metrics (after test loop has finished)
if args.mlperf_logging:
validation_results = calculate_metrics(targets, scores)
gA_test = validation_results['accuracy']
gL_test = validation_results['loss']
else:
gA_test = test_accu / test_samp
gL_test = test_loss / test_samp
# print metrics
is_best = gA_test > best_gA_test
if is_best:
best_gA_test = gA_test
if args.mlperf_logging:
is_best = validation_results['roc_auc'] > best_auc_test
if is_best:
best_auc_test = validation_results['roc_auc']
print(
"Testing at - {}/{} of epoch {},".format(j + 1, nbatches, k)
+ " loss {:.6f}, recall {:.4f}, precision {:.4f},".format(
validation_results['loss'],
validation_results['recall'],
validation_results['precision']
)
+ " f1 {:.4f}, ap {:.4f},".format(
validation_results['f1'],
validation_results['ap'],
)
+ " auc {:.4f}, best auc {:.4f},".format(
validation_results['roc_auc'],
best_auc_test
)
+ " accuracy {:3.3f} %, best accuracy {:3.3f} %".format(
validation_results['accuracy'] * 100,
best_gA_test * 100
)
)
else:
print(
"Testing at - {}/{} of epoch {},".format(j + 1, nbatches, 0)
+ " loss {:.6f}, accuracy {:3.3f} %, best {:3.3f} %".format(
gL_test, gA_test * 100, best_gA_test * 100
)
)
# check thresholds
if (args.mlperf_logging
and (args.mlperf_acc_threshold > 0)
and (best_gA_test > args.mlperf_acc_threshold)):
print("MLPerf testing accuracy threshold "
+ str(args.mlperf_acc_threshold)
+ " reached, stop training")
break
if (args.mlperf_logging
and (args.mlperf_auc_threshold > 0)
and (best_auc_test > args.mlperf_auc_threshold)):
print("MLPerf testing auc threshold "
+ str(args.mlperf_auc_threshold)
+ " reached, stop training")
break
j += 1 # nbatches
k += 1 # nepochs
# test prints
if not args.inference_only and args.debug_mode:
print("updated parameters (weights and bias):")
dlrm.print_weights()
# build onnx model from caffe2
if args.save_onnx:
pnet = dlrm.parameters().net.Proto()
inet = dlrm.parameters().param_init_net.Proto()
value_info = dlrm.onnx_tsd # None
# debug prints
# print(value_info)
# WARNING: Why Caffe2 to ONNX net transformation currently does not work?
# ONNX does not support SparseLengthsSum operator directly. A workaround
# could be for the Caffe2 ONNX frontend to indirectly map this operator to
# Gather and ReducedSum ONNX operators, following the PyTorch approach.
c2f = caffe2.python.onnx.frontend.Caffe2Frontend()
dlrm_caffe2_onnx = c2f.caffe2_net_to_onnx_model(pnet, inet, value_info)
# check the onnx model
onnx.checker.check_model(dlrm_caffe2_onnx)
# save model to a file
with open("dlrm_s_caffe2.onnx", "w+") as dlrm_caffe2_onnx_file:
dlrm_caffe2_onnx_file.write(str(dlrm_caffe2_onnx))
# build protobuf with types and shapes
if args.save_proto_types_shapes:
# add types and shapes to protobuf
__TYPE_MAPPING = {
onnx.TensorProto.FLOAT: caffe2_pb2.TensorProto.FLOAT,
onnx.TensorProto.UINT8: caffe2_pb2.TensorProto.UINT8,
onnx.TensorProto.INT8: caffe2_pb2.TensorProto.INT8,
onnx.TensorProto.UINT16: caffe2_pb2.TensorProto.UINT16,
onnx.TensorProto.INT16: caffe2_pb2.TensorProto.INT16,
onnx.TensorProto.INT32: caffe2_pb2.TensorProto.INT32,
onnx.TensorProto.INT64: caffe2_pb2.TensorProto.INT64,
onnx.TensorProto.STRING: caffe2_pb2.TensorProto.STRING,
onnx.TensorProto.BOOL: caffe2_pb2.TensorProto.BOOL,
onnx.TensorProto.FLOAT16: caffe2_pb2.TensorProto.FLOAT16,
onnx.TensorProto.DOUBLE: caffe2_pb2.TensorProto.DOUBLE,
}
pnet = dlrm.parameters().net.Proto()
arg = pnet.arg.add()
arg.name = "input_shape_info"
for i in pnet.external_input:
if i in dlrm.onnx_tsd:
onnx_dtype, shape = dlrm.onnx_tsd[i]
t = arg.tensors.add()
t.name = i
t.data_type = __TYPE_MAPPING[onnx_dtype]
t.dims.extend(shape)
else:
print("Warning: we don't have shape/type info for input: {}".format(i))
# debug print
# print(pnet)
# export the protobuf with types and shapes
with open("dlrm_s_caffe2.proto", "w+") as dlrm_s_proto_file:
dlrm_s_proto_file.write(str(pnet))
"""
# export the protobuf with types and shapes as well as weights
# see https://github.com/pytorch/pytorch/issues/9533
#save
net = dlrm.parameters().net
params = dlrm.parameters().params
init_net, predict_net = mobile_exporter.Export(workspace, net, params)
with open("dlrm_s_caffe2.predict", "wb") as dlrm_s_predict_file:
dlrm_s_predict_file.write(predict_net.SerializeToString())
with open("dlrm_s_caffe2.init", "wb") as dlrm_s_init_file:
dlrm_s_init_file.write(init_net.SerializeToString())
#load
net_def = caffe2_pb2.NetDef()
init_def= caffe2_pb2.NetDef()
with open("dlrm_s_caffe2.predict", "rb") as dlrm_s_predict_file:
net_def.ParseFromString(dlrm_s_predict_file.read())
print(net_def)
with open("dlrm_s_caffe2.init", "rb") as dlrm_s_init_file:
init_def.ParseFromString(dlrm_s_init_file.read())
print(init_def)
"""
|
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
import builtins
import functools
# import bisect
# import shutil
import time
import json
from typing import Tuple
import sys
# data generation
from . import dlrm_data_pytorch as dp
# numpy
import numpy as np
# pytorch
import torch
import torch.nn as nn
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.scatter_gather import gather, scatter
# quotient-remainder trick
from .tricks.qr_embedding_bag import QREmbeddingBag
# mixed-dimension trick
from .tricks.md_embedding_bag import PrEmbeddingBag, md_solver
from torch.optim.lr_scheduler import _LRScheduler
from .dlrm_s_pytorch import DLRM_Net, LRPolicyScheduler
from argparse import Namespace
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import RECOMMENDATION
class Model(BenchmarkModel):
task = RECOMMENDATION.RECOMMENDATION
DEFAULT_TRAIN_BSIZE = 2048
DEFAULT_EVAL_BSIZE = 2048
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
# Train architecture: use the configuration in the paper.
# Source: https://arxiv.org/pdf/1906.00091.pdf
arch_embedding_size = "1000000-1000000-1000000-1000000-1000000-1000000-1000000-1000000"
arch_sparse_feature_size = 64
arch_mlp_bot = "512-512-64"
arch_mlp_top = "1024-1024-1024-1"
data_generation = "random"
mini_batch_size = self.batch_size
num_batches = 1
num_indicies_per_lookup = 100
self.opt = Namespace(**{
'm_spa' : None,
'ln_emb': None,
'ln_bot': None,
'ln_top': None,
'arch_interaction_op': "dot",
'arch_interaction_itself': False,
'sigmoid_bot': -1,
'sigmoid_top': -1,
'sync_dense_params': True,
'loss_threshold': 0.0,
'ndevices': -1,
'qr_flag': False,
'qr_operation': "mult",
'qr_collisions': 0,
'qr_threshold': 200,
'md_flag': False,
'md_threshold': 200,
'md_temperature': 0.3,
'activation_function': "relu",
'loss_function': "bce",
'loss_weights': "1.0-1.0",
'loss_threshold': 0.0,
'round_targets': False,
'data_size': 6,
'data_generation': data_generation,
'data_trace_file': "./input/dist_emb_j.log",
'raw_data_file': "",
'processed_data_file': "",
'data_randomize': "total",
'data_trace_enable_padding': False,
'max_ind_range': -1,
'num_workers': 0,
'memory_map': False,
'data_sub_sample_rate': 0.0,
'learning_rate': 0.01,
'lr_num_warmup_steps': 0,
'lr_decay_start_step': 0,
'lr_num_decay_steps': 0,
'arch_embedding_size': arch_embedding_size,
'arch_sparse_feature_size': arch_sparse_feature_size,
'arch_mlp_bot': arch_mlp_bot,
'arch_mlp_top': arch_mlp_top,
'mini_batch_size': mini_batch_size,
'num_batches': num_batches,
'num_indices_per_lookup': num_indicies_per_lookup,
'num_indices_per_lookup_fixed': True,
'numpy_rand_seed': 123,
})
if self.device == "cuda":
torch.cuda.manual_seed_all(self.opt.numpy_rand_seed)
torch.backends.cudnn.deterministic = True
# Prepare training data
self.opt.ln_bot = np.fromstring(self.opt.arch_mlp_bot, dtype=int, sep="-")
# Input and target at random
self.opt.ln_emb = np.fromstring(self.opt.arch_embedding_size, dtype=int, sep="-")
self.opt.m_den = self.opt.ln_bot[0]
train_data, self.train_ld = dp.make_random_data_and_loader(self.opt, self.opt.ln_emb, self.opt.m_den)
self.opt.nbatches = len(self.train_ld)
self.opt.m_spa = self.opt.arch_sparse_feature_size
num_fea = self.opt.ln_emb.size + 1 # num sparse + num dense features
m_den_out = self.opt.ln_bot[self.opt.ln_bot.size - 1]
if self.opt.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if self.opt.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif self.opt.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ self.opt.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + self.opt.arch_mlp_top
self.opt.ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
dlrm = DLRM_Net(
self.opt.m_spa,
self.opt.ln_emb,
self.opt.ln_bot,
self.opt.ln_top,
arch_interaction_op=self.opt.arch_interaction_op,
arch_interaction_itself=self.opt.arch_interaction_itself,
sigmoid_bot=self.opt.sigmoid_bot,
sigmoid_top=self.opt.sigmoid_top,
sync_dense_params=self.opt.sync_dense_params,
loss_threshold=self.opt.loss_threshold,
ndevices=self.opt.ndevices,
qr_flag=self.opt.qr_flag,
qr_operation=self.opt.qr_operation,
qr_collisions=self.opt.qr_collisions,
qr_threshold=self.opt.qr_threshold,
md_flag=self.opt.md_flag,
md_threshold=self.opt.md_threshold,
)
# Preparing data
X, lS_o, lS_i, self.targets = next(iter(self.train_ld))
X = X.to(self.device)
lS_i = [S_i.to(self.device) for S_i in lS_i] if isinstance(lS_i, list) \
else lS_i.to(self.device)
lS_o = [S_o.to(self.device) for S_o in lS_o] if isinstance(lS_o, list) \
else lS_o.to(self.device)
self.targets = self.targets.to(self.device)
# Setting Loss Function
if self.opt.loss_function == "mse":
self.loss_fn = torch.nn.MSELoss(reduction="mean")
elif self.opt.loss_function == "bce":
self.loss_fn = torch.nn.BCELoss(reduction="mean")
elif self.opt.loss_function == "wbce":
self.loss_ws = torch.tensor(np.fromstring(self.opt.loss_weights, dtype=float, sep="-"))
self.loss_fn = torch.nn.BCELoss(reduction="none")
else:
sys.exit("ERROR: --loss-function=" + self.opt.loss_function + " is not supported")
self.model = dlrm.to(self.device)
self.example_inputs = (X, lS_o, lS_i)
if test == "train":
self.model.train()
self.loss_fn = torch.nn.MSELoss(reduction="mean")
self.optimizer = torch.optim.SGD(dlrm.parameters(), lr=self.opt.learning_rate)
self.lr_scheduler = LRPolicyScheduler(self.optimizer,
self.opt.lr_num_warmup_steps,
self.opt.lr_decay_start_step,
self.opt.lr_num_decay_steps)
elif test == "eval":
self.model.eval()
def get_module(self):
return self.model, self.example_inputs
def get_optimizer(self):
if hasattr(self, "optimizer"):
return self.optimizer
return None
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self.lr_scheduler = LRPolicyScheduler(self.optimizer,
self.opt.lr_num_warmup_steps,
self.opt.lr_decay_start_step,
self.opt.lr_num_decay_steps)
def eval(self) -> Tuple[torch.Tensor]:
out = self.model(*self.example_inputs)
return (out, )
def train(self):
gen = self.model(*self.example_inputs)
self.optimizer.zero_grad()
loss = self.loss_fn(gen, self.targets)
if self.opt.loss_function == "wbce":
loss_ws_ = self.loss_ws[T.data.view(-1).long()].view_as(T)
loss = loss_ws_ * loss
loss = loss.mean()
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
# get_optimizer override is important! This model has both a self.opt
# _and_ a self.optimizer and we want just the optimizer
def get_optimizer(self):
return self.optimizer
# set_optimizer override is important! This model has both a self.opt
# _and_ a self.optimizer and we want just the optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the DLRM benchmark
#
# Utility function(s) to download and pre-process public data sets
# - Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
# - Criteo Terabyte Dataset
# https://labs.criteo.com/2013/12/download-terabyte-click-logs
#
# After downloading dataset, run:
# getCriteoAdData(
# datafile="<path-to-train.txt>",
# o_filename=kaggleAdDisplayChallenge_processed.npz,
# max_ind_range=-1,
# sub_sample_rate=0.0,
# days=7,
# data_split='train',
# randomize='total',
# criteo_kaggle=True,
# memory_map=False
# )
# getCriteoAdData(
# datafile="<path-to-day_{0,...,23}>",
# o_filename=terabyte_processed.npz,
# max_ind_range=-1,
# sub_sample_rate=0.0,
# days=24,
# data_split='train',
# randomize='total',
# criteo_kaggle=False,
# memory_map=False
# )
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
# import os
from os import path
# import io
# from io import StringIO
# import collections as coll
import numpy as np
def convertUStringToDistinctIntsDict(mat, convertDicts, counts):
# Converts matrix of unicode strings into distinct integers.
#
# Inputs:
# mat (np.array): array of unicode strings to convert
# convertDicts (list): dictionary for each column
# counts (list): number of different categories in each column
#
# Outputs:
# out (np.array): array of output integers
# convertDicts (list): dictionary for each column
# counts (list): number of different categories in each column
# check if convertDicts and counts match correct length of mat
if len(convertDicts) != mat.shape[1] or len(counts) != mat.shape[1]:
print("Length of convertDicts or counts does not match input shape")
print("Generating convertDicts and counts...")
convertDicts = [{} for _ in range(mat.shape[1])]
counts = [0 for _ in range(mat.shape[1])]
# initialize output
out = np.zeros(mat.shape)
for j in range(mat.shape[1]):
for i in range(mat.shape[0]):
# add to convertDict and increment count
if mat[i, j] not in convertDicts[j]:
convertDicts[j][mat[i, j]] = counts[j]
counts[j] += 1
out[i, j] = convertDicts[j][mat[i, j]]
return out, convertDicts, counts
def convertUStringToDistinctIntsUnique(mat, mat_uni, counts):
# mat is an array of 0,...,# samples, with each being 26 categorical features
# check if mat_unique and counts match correct length of mat
if len(mat_uni) != mat.shape[1] or len(counts) != mat.shape[1]:
print("Length of mat_unique or counts does not match input shape")
print("Generating mat_unique and counts...")
mat_uni = [np.array([]) for _ in range(mat.shape[1])]
counts = [0 for _ in range(mat.shape[1])]
# initialize output
out = np.zeros(mat.shape)
ind_map = [np.array([]) for _ in range(mat.shape[1])]
# find out and assign unique ids to features
for j in range(mat.shape[1]):
m = mat_uni[j].size
mat_concat = np.concatenate((mat_uni[j], mat[:, j]))
mat_uni[j], ind_map[j] = np.unique(mat_concat, return_inverse=True)
out[:, j] = ind_map[j][m:]
counts[j] = mat_uni[j].size
return out, mat_uni, counts
def processCriteoAdData(d_path, d_file, npzfile, split, convertDicts, pre_comp_counts):
# Process Kaggle Display Advertising Challenge or Terabyte Dataset
# by converting unicode strings in X_cat to integers and
# converting negative integer values in X_int.
#
# Loads data in the form "{kaggle|terabyte}_day_i.npz" where i is the day.
#
# Inputs:
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# split (int): total number of splits in the dataset (typically 7 or 24)
# process data if not all files exist
for i in range(split):
filename_i = npzfile + "_{0}_processed.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i, end="\r")
else:
with np.load(npzfile + "_{0}.npz".format(i)) as data:
# categorical features
'''
# Approach 1a: using empty dictionaries
X_cat, convertDicts, counts = convertUStringToDistinctIntsDict(
data["X_cat"], convertDicts, counts
)
'''
'''
# Approach 1b: using empty np.unique
X_cat, convertDicts, counts = convertUStringToDistinctIntsUnique(
data["X_cat"], convertDicts, counts
)
'''
# Approach 2a: using pre-computed dictionaries
X_cat_t = np.zeros(data["X_cat_t"].shape)
for j in range(26):
for k, x in enumerate(data["X_cat_t"][j, :]):
X_cat_t[j, k] = convertDicts[j][x]
# continuous features
X_int = data["X_int"]
X_int[X_int < 0] = 0
# targets
y = data["y"]
np.savez_compressed(
filename_i,
# X_cat = X_cat,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=X_int,
y=y,
)
print("Processed " + filename_i, end="\r")
print("")
# sanity check (applicable only if counts have been pre-computed & are re-computed)
# for j in range(26):
# if pre_comp_counts[j] != counts[j]:
# sys.exit("ERROR: Sanity check on counts has failed")
# print("\nSanity check on counts passed")
return
def concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
memory_map,
o_filename
):
# Concatenates different days and saves the result.
#
# Inputs:
# days (int): total number of days in the dataset (typically 7 or 24)
# d_path (str): path for {kaggle|terabyte}_day_i.npz files
# o_filename (str): output file name
#
# Output:
# o_file (str): output file path
if memory_map:
# dataset break up per fea
# tar_fea = 1 # single target
den_fea = 13 # 13 dense features
spa_fea = 26 # 26 sparse features
# tad_fea = tar_fea + den_fea
# tot_fea = tad_fea + spa_fea
# create offset per file
offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
offset_per_file[i + 1] += offset_per_file[i]
'''
# Approach 1, 2 and 3 use indices, while Approach 4 does not use them
# create indices
indices = np.arange(total_count)
if data_split == "none":
if randomize == "total":
indices = np.random.permutation(indices)
else:
indices = np.array_split(indices, offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
indices = np.concatenate((train_indices, test_indices))
# no reordering
# indices = np.arange(total_count)
'''
'''
# Approach 1: simple and slow (no grouping is used)
# check if data already exists
recreate_flag = False
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((total_count))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered".format(j)
np.save(filename_j, z)
print("Creating " + filename_j)
for i in range(days):
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat_t = np.transpose(data["X_cat"])
X_int_t = np.transpose(data["X_int"])
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
# print(filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r+')
if j < tar_fea:
fj[indices[start:end]] = y
elif tar_fea <= j and j < tad_fea:
fj[indices[start:end]] = X_int_t[j - tar_fea, :]
else:
fj[indices[start:end]] = X_cat_t[j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
size = total_per_file[i]
X_int_t = np.zeros((den_fea, size))
X_cat_t = np.zeros((spa_fea, size))
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
print("Creating " + filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r')
if j < tar_fea:
y = fj[start:end]
elif tar_fea <= j and j < tad_fea:
X_int_t[j - tar_fea, :] = fj[start:end]
else:
X_cat_t[j - tad_fea, :] = fj[start:end]
del fj
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=np.transpose(X_int_t), # transpose of the data
y=y,
)
else:
print("Reordered day files already exist, skipping ...")
'''
'''
# Approach 2: group days
# check if data already exists
recreate_flag = False
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((total_count))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered".format(j)
np.save(filename_j, z)
print("Creating " + filename_j)
group_day = 3 # e.g. 8, 4 or 3
group_num = days // group_day
file_group = [i*group_day for i in range(group_num)] + [days]
for ii in range(group_num):
# for last may be group_size != group_num, therefore reset it below
group_size = file_group[ii + 1] - file_group[ii]
X_cat_t = [0]*group_size
X_int_t = [0]*group_size
y = [0]*group_size
start = [0]*group_size
end = [0]*group_size
for ig in range(group_size):
i = file_group[ii] + ig
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
# setup start and end ranges
start[ig] = offset_per_file[i]
end[ig] = offset_per_file[i + 1]
# print(filename_i)
# load a group of files
with np.load(filename_i) as data:
X_cat_t[ig] = np.transpose(data["X_cat"])
X_int_t[ig] = np.transpose(data["X_int"])
y[ig] = data["y"]
# sanity check
if total_per_file[i] != len(y[ig]):
sys.exit("ERROR: sanity check on number of samples failed")
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end[ig]-start[ig]) + "=" + str(total_per_file[i]))
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r+')
for ig in range(group_size):
if j < tar_fea:
fj[indices[start[ig]:end[ig]]] = y[ig]
elif tar_fea <= j and j < tad_fea:
fj[indices[start[ig]:end[ig]]] = X_int_t[ig][j - tar_fea, :]
else:
fj[indices[start[ig]:end[ig]]] = X_cat_t[ig][j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing " + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for ii in range(group_num):
# for last may be group_size != group_num, therefore reset it below
group_size = file_group[ii + 1] - file_group[ii]
X_cat_t= []; X_int_t = []
for ig in range(group_size):
i = file_group[ii] + ig
X_int_t.append(np.zeros((den_fea, total_per_file[i])))
X_cat_t.append(np.zeros((spa_fea, total_per_file[i])))
y = [0]*group_size
start = [0]*group_size
end = [0]*group_size
for j in range(tot_fea):
filename_j = trafile + "_{0}_reordered.npy".format(j)
fj = np.load(filename_j, mmap_mode='r')
# load a group of files
for ig in range(group_size):
i = file_group[ii] + ig
# setup start and end ranges
start[ig] = offset_per_file[i]
end[ig] = offset_per_file[i + 1]
# load data for the group of files
if j < tar_fea:
y[ig] = fj[start[ig]:end[ig]]
elif tar_fea <= j and j < tad_fea:
X_int_t[ig][j - tar_fea, :] = fj[start[ig]:end[ig]]
else:
X_cat_t[ig][j - tad_fea, :] = fj[start[ig]:end[ig]]
del fj
for ig in range(group_size):
i = file_group[ii] + ig
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
print("Creating " + filename_i)
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t[ig]), # transpose of the data
X_int=np.transpose(X_int_t[ig]), # transpose of the data
y=y[ig],
)
else:
print("Reordered day files already exist, skipping ...")
'''
'''
# Approach 3: group features
# check if data already exists
group_fea = 5 # e.g. 8, 5 or 4
group_num = tot_fea // group_fea
if tot_fea % group_fea != 0: # sanity check
sys.exit("ERROR: the group_fea must divided tot_fea evenly.")
recreate_flag = False
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# load, reorder and concatenate data (memmap all reordered files per feature)
if recreate_flag:
# init reordered files (.npy appended automatically)
z = np.zeros((group_fea, total_count))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}".format(
jn, group_fea
)
np.save(filename_j, z)
print("Creating " + filename_j)
for i in range(days):
filename_i = d_path + npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat_t = np.transpose(data["X_cat"])
X_int_t = np.transpose(data["X_int"])
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
# print(filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
fj = np.load(filename_j, mmap_mode='r+')
for jg in range(group_fea):
j = jn * group_fea + jg
# print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg))
if j < tar_fea:
fj[jg, indices[start:end]] = y
elif tar_fea <= j and j < tad_fea:
fj[jg, indices[start:end]] = X_int_t[j - tar_fea, :]
else:
fj[jg, indices[start:end]] = X_cat_t[j - tad_fea, :]
del fj
else:
print("Reordered fea files already exist, skipping ...")
# check if data already exists
recreate_flag = False
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
if path.exists(filename_i):
print("Using existing" + filename_i)
else:
recreate_flag = True
# split reordered data by files (memmap all reordered files per feature)
# on the day boundary del the file object and memmap again
if recreate_flag:
for i in range(days):
filename_i = d_path + npzfile + "_{0}_reordered.npz".format(i)
size = total_per_file[i]
X_int_t = np.zeros((den_fea, size))
X_cat_t = np.zeros((spa_fea, size))
# setup start and end ranges
start = offset_per_file[i]
end = offset_per_file[i + 1]
print("Creating " + filename_i)
# print("start=" + str(start) + " end=" + str(end)
# + " diff=" + str(end - start) + "=" + str(total_per_file[i]))
for jn in range(group_num):
filename_j = trafile + "_{0}_reordered{1}.npy".format(
jn, group_fea
)
fj = np.load(filename_j, mmap_mode='r')
for jg in range(group_fea):
j = jn * group_fea + jg
# print("j=" + str(j) + " jn=" + str(jn) + " jg=" + str(jg))
if j < tar_fea:
y = fj[jg, start:end]
elif tar_fea <= j and j < tad_fea:
X_int_t[j - tar_fea, :] = fj[jg, start:end]
else:
X_cat_t[j - tad_fea, :] = fj[jg, start:end]
del fj
np.savez_compressed(
filename_i,
X_cat=np.transpose(X_cat_t), # transpose of the data
X_int=np.transpose(X_int_t), # transpose of the data
y=y,
)
else:
print("Reordered day files already exist, skipping ...")
'''
# Approach 4: Fisher-Yates-Rao (FYR) shuffle algorithm
# 1st pass of FYR shuffle
# check if data already exists
recreate_flag = False
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
if (
path.exists(filename_j_y)
and path.exists(filename_j_d)
and path.exists(filename_j_s)
):
print(
"Using existing\n"
+ filename_j_y + "\n"
+ filename_j_d + "\n"
+ filename_j_s
)
else:
recreate_flag = True
# reorder across buckets using sampling
if recreate_flag:
# init intermediate files (.npy appended automatically)
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s".format(j)
np.save(filename_j_y, np.zeros((total_per_file[j])))
np.save(filename_j_d, np.zeros((total_per_file[j], den_fea)))
np.save(filename_j_s, np.zeros((total_per_file[j], spa_fea)))
# start processing files
total_counter = [0] * days
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
size = len(y)
# sanity check
if total_per_file[i] != size:
sys.exit("ERROR: sanity check on number of samples failed")
# debug prints
print("Reordering (1st pass) " + filename_i)
# create buckets using sampling of random ints
# from (discrete) uniform distribution
buckets = []
for _j in range(days):
buckets.append([])
counter = [0] * days
days_to_sample = days if data_split == "none" else days - 1
if randomize == "total":
rand_u = np.random.randint(low=0, high=days_to_sample, size=size)
for k in range(size):
# sample and make sure elements per buckets do not overflow
if data_split == "none" or i < days - 1:
# choose bucket
p = rand_u[k]
# retry of the bucket is full
while total_counter[p] + counter[p] >= total_per_file[p]:
p = np.random.randint(low=0, high=days_to_sample)
else: # preserve the last day/bucket if needed
p = i
buckets[p].append(k)
counter[p] += 1
else: # randomize is day or none
for k in range(size):
# do not sample, preserve the data in this bucket
p = i
buckets[p].append(k)
counter[p] += 1
# sanity check
if np.sum(counter) != size:
sys.exit("ERROR: sanity check on number of samples failed")
# debug prints
# print(counter)
# print(str(np.sum(counter)) + " = " + str(size))
# print([len(x) for x in buckets])
# print(total_counter)
# partially feel the buckets
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
start = total_counter[j]
end = total_counter[j] + counter[j]
# target buckets
fj_y = np.load(filename_j_y, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_y[start:end].shape) + " "
# + str(len(buckets[j])))
fj_y[start:end] = y[buckets[j]]
del fj_y
# dense buckets
fj_d = np.load(filename_j_d, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_d[start:end, :].shape) + " "
# + str(len(buckets[j])))
fj_d[start:end, :] = X_int[buckets[j], :]
del fj_d
# sparse buckets
fj_s = np.load(filename_j_s, mmap_mode='r+')
# print("start=" + str(start) + " end=" + str(end)
# + " end - start=" + str(end - start) + " "
# + str(fj_s[start:end, :].shape) + " "
# + str(len(buckets[j])))
fj_s[start:end, :] = X_cat[buckets[j], :]
del fj_s
# update counters for next step
total_counter[j] += counter[j]
# 2nd pass of FYR shuffle
# check if data already exists
for j in range(days):
filename_j = npzfile + "_{0}_reordered.npz".format(j)
if path.exists(filename_j):
print("Using existing " + filename_j)
else:
recreate_flag = True
# reorder within buckets
if recreate_flag:
for j in range(days):
filename_j_y = npzfile + "_{0}_intermediate_y.npy".format(j)
filename_j_d = npzfile + "_{0}_intermediate_d.npy".format(j)
filename_j_s = npzfile + "_{0}_intermediate_s.npy".format(j)
fj_y = np.load(filename_j_y)
fj_d = np.load(filename_j_d)
fj_s = np.load(filename_j_s)
indices = range(total_per_file[j])
if randomize == "day" or randomize == "total":
if data_split == "none" or j < days - 1:
indices = np.random.permutation(range(total_per_file[j]))
filename_r = npzfile + "_{0}_reordered.npz".format(j)
print("Reordering (2nd pass) " + filename_r)
np.savez_compressed(
filename_r,
X_cat=fj_s[indices, :],
X_int=fj_d[indices, :],
y=fj_y[indices],
)
'''
# sanity check (under no reordering norms should be zero)
for i in range(days):
filename_i_o = npzfile + "_{0}_processed.npz".format(i)
print(filename_i_o)
with np.load(filename_i_o) as data_original:
X_cat_o = data_original["X_cat"]
X_int_o = data_original["X_int"]
y_o = data_original["y"]
filename_i_r = npzfile + "_{0}_reordered.npz".format(i)
print(filename_i_r)
with np.load(filename_i_r) as data_reordered:
X_cat_r = data_reordered["X_cat"]
X_int_r = data_reordered["X_int"]
y_r = data_reordered["y"]
print(np.linalg.norm(y_o - y_r))
print(np.linalg.norm(X_int_o - X_int_r))
print(np.linalg.norm(X_cat_o - X_cat_r))
'''
else:
print("Concatenating multiple days into %s.npz file" % str(d_path + o_filename))
# load and concatenate data
for i in range(days):
filename_i = npzfile + "_{0}_processed.npz".format(i)
with np.load(filename_i) as data:
if i == 0:
X_cat = data["X_cat"]
X_int = data["X_int"]
y = data["y"]
else:
X_cat = np.concatenate((X_cat, data["X_cat"]))
X_int = np.concatenate((X_int, data["X_int"]))
y = np.concatenate((y, data["y"]))
print("Loaded day:", i, "y = 1:", len(y[y == 1]), "y = 0:", len(y[y == 0]))
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
print("Loaded counts!")
np.savez_compressed(
d_path + o_filename + ".npz",
X_cat=X_cat,
X_int=X_int,
y=y,
counts=counts,
)
return d_path + o_filename + ".npz"
def transformCriteoAdData(X_cat, X_int, y, days, data_split, randomize, total_per_file):
# Transforms Criteo Kaggle or terabyte data by applying log transformation
# on dense features and converting everything to appropriate tensors.
#
# Inputs:
# X_cat (ndarray): array of integers corresponding to preprocessed
# categorical features
# X_int (ndarray): array of integers corresponding to dense features
# y (ndarray): array of bool corresponding to labels
# data_split(str): flag for splitting dataset into training/validation/test
# sets
# randomize (str): determines randomization scheme
# "none": no randomization
# "day": randomizes each day"s data (only works if split = True)
# "total": randomizes total dataset
#
# Outputs:
# if split:
# X_cat_train (tensor): sparse features for training set
# X_int_train (tensor): dense features for training set
# y_train (tensor): labels for training set
# X_cat_val (tensor): sparse features for validation set
# X_int_val (tensor): dense features for validation set
# y_val (tensor): labels for validation set
# X_cat_test (tensor): sparse features for test set
# X_int_test (tensor): dense features for test set
# y_test (tensor): labels for test set
# else:
# X_cat (tensor): sparse features
# X_int (tensor): dense features
# y (tensor): label
# define initial set of indices
indices = np.arange(len(y))
# create offset per file
offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
offset_per_file[i + 1] += offset_per_file[i]
# split dataset
if data_split == 'train':
indices = np.array_split(indices, offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
test_indices, val_indices = np.array_split(test_indices, 2)
print("Defined training and testing indices...")
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
# indices = np.concatenate((train_indices, test_indices))
# create training, validation, and test sets
X_cat_train = X_cat[train_indices]
X_int_train = X_int[train_indices]
y_train = y[train_indices]
X_cat_val = X_cat[val_indices]
X_int_val = X_int[val_indices]
y_val = y[val_indices]
X_cat_test = X_cat[test_indices]
X_int_test = X_int[test_indices]
y_test = y[test_indices]
print("Split data according to indices...")
X_cat_train = X_cat_train.astype(np.long)
X_int_train = np.log(X_int_train.astype(np.float32) + 1)
y_train = y_train.astype(np.float32)
X_cat_val = X_cat_val.astype(np.long)
X_int_val = np.log(X_int_val.astype(np.float32) + 1)
y_val = y_val.astype(np.float32)
X_cat_test = X_cat_test.astype(np.long)
X_int_test = np.log(X_int_test.astype(np.float32) + 1)
y_test = y_test.astype(np.float32)
print("Converted to tensors...done!")
return (
X_cat_train,
X_int_train,
y_train,
X_cat_val,
X_int_val,
y_val,
X_cat_test,
X_int_test,
y_test,
)
else:
# randomize data
if randomize == "total":
indices = np.random.permutation(indices)
print("Randomized indices...")
X_cat = X_cat[indices].astype(np.long)
X_int = np.log(X_int[indices].astype(np.float32) + 1)
y = y[indices].astype(np.float32)
print("Converted to tensors...done!")
return (X_cat, X_int, y, [], [], [], [], [], [])
def getCriteoAdData(
datafile,
o_filename,
max_ind_range=-1,
sub_sample_rate=0.0,
days=7,
data_split='train',
randomize='total',
criteo_kaggle=True,
memory_map=False
):
# Passes through entire dataset and defines dictionaries for categorical
# features and determines the number of total categories.
#
# Inputs:
# datafile : path to downloaded raw data file
# o_filename (str): saves results under o_filename if filename is not ""
#
# Output:
# o_file (str): output file path
#split the datafile into path and filename
lstr = datafile.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0] if criteo_kaggle else lstr[-1]
npzfile = d_path + ((d_file + "_day") if criteo_kaggle else d_file)
trafile = d_path + ((d_file + "_fea") if criteo_kaggle else "fea")
# count number of datapoints in training set
total_file = d_path + d_file + "_day_count.npz"
if path.exists(total_file):
with np.load(total_file) as data:
total_per_file = list(data["total_per_file"])
total_count = np.sum(total_per_file)
print("Skipping counts per file (already exist)")
else:
total_count = 0
total_per_file = []
if criteo_kaggle:
# WARNING: The raw data consists of a single train.txt file
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
if path.exists(datafile):
print("Reading data from path=%s" % (datafile))
with open(str(datafile)) as f:
for _ in f:
total_count += 1
total_per_file.append(total_count)
# reset total per file due to split
num_data_per_split, extras = divmod(total_count, days)
total_per_file = [num_data_per_split] * days
for j in range(extras):
total_per_file[j] += 1
# split into days (simplifies code later on)
file_id = 0
boundary = total_per_file[file_id]
nf = open(npzfile + "_" + str(file_id), "w")
with open(str(datafile)) as f:
for j, line in enumerate(f):
if j == boundary:
nf.close()
file_id += 1
nf = open(npzfile + "_" + str(file_id), "w")
boundary += total_per_file[file_id]
nf.write(line)
nf.close()
else:
sys.exit("ERROR: Criteo Kaggle Display Ad Challenge Dataset path is invalid; please download from https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset")
else:
# WARNING: The raw data consist of day_0.gz,... ,day_23.gz text files
# Each line in the file is a sample, consisting of 13 continuous and
# 26 categorical features (an extra space indicates that feature is
# missing and will be interpreted as 0).
for i in range(days):
datafile_i = datafile + "_" + str(i) # + ".gz"
if path.exists(str(datafile_i)):
print("Reading data from path=%s" % (str(datafile_i)))
# file day_<number>
total_per_file_count = 0
with open(str(datafile_i)) as f:
for _ in f:
total_per_file_count += 1
total_per_file.append(total_per_file_count)
total_count += total_per_file_count
else:
sys.exit("ERROR: Criteo Terabyte Dataset path is invalid; please download from https://labs.criteo.com/2013/12/download-terabyte-click-logs")
# process a file worth of data and reinitialize data
# note that a file main contain a single or multiple splits
def process_one_file(
datfile,
npzfile,
split,
num_data_in_split,
):
with open(str(datfile)) as f:
y = np.zeros(num_data_in_split, dtype="i4") # 4 byte int
X_int = np.zeros((num_data_in_split, 13), dtype="i4") # 4 byte int
X_cat = np.zeros((num_data_in_split, 26), dtype="i4") # 4 byte int
if sub_sample_rate == 0.0:
rand_u = 1.0
else:
rand_u = np.random.uniform(low=0.0, high=1.0, size=num_data_in_split)
i = 0
for k, line in enumerate(f):
# process a line (data point)
line = line.split('\t')
# set missing values to zero
for j in range(len(line)):
if (line[j] == '') or (line[j] == '\n'):
line[j] = '0'
# sub-sample data by dropping zero targets, if needed
target = np.int32(line[0])
if target == 0 and \
(rand_u if sub_sample_rate == 0.0 else rand_u[k]) < sub_sample_rate:
continue
y[i] = target
X_int[i] = np.array(line[1:14], dtype=np.int32)
if max_ind_range > 0:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16) % max_ind_range, line[14:])),
dtype=np.int32
)
else:
X_cat[i] = np.array(
list(map(lambda x: int(x, 16), line[14:])),
dtype=np.int32
)
# count uniques
for j in range(26):
convertDicts[j][X_cat[i][j]] = 1
# debug prints
print(
"Load %d/%d Split: %d Label True: %d Stored: %d"
% (
i,
num_data_in_split,
split,
target,
y[i],
),
end="\r",
)
i += 1
# store num_data_in_split samples or extras at the end of file
# count uniques
# X_cat_t = np.transpose(X_cat)
# for j in range(26):
# for x in X_cat_t[j,:]:
# convertDicts[j][x] = 1
# store parsed
filename_s = npzfile + "_{0}.npz".format(split)
if path.exists(filename_s):
print("\nSkip existing " + filename_s)
else:
np.savez_compressed(
filename_s,
X_int=X_int[0:i, :],
# X_cat=X_cat[0:i, :],
X_cat_t=np.transpose(X_cat[0:i, :]), # transpose of the data
y=y[0:i],
)
print("\nSaved " + npzfile + "_{0}.npz!".format(split))
return i
# create all splits (reuse existing files if possible)
recreate_flag = False
convertDicts = [{} for _ in range(26)]
# WARNING: to get reproducable sub-sampling results you must reset the seed below
# np.random.seed(123)
# in this case there is a single split in each day
for i in range(days):
datfile_i = npzfile + "_{0}".format(i) # + ".gz"
npzfile_i = npzfile + "_{0}.npz".format(i)
npzfile_p = npzfile + "_{0}_processed.npz".format(i)
if path.exists(npzfile_i):
print("Skip existing " + npzfile_i)
elif path.exists(npzfile_p):
print("Skip existing " + npzfile_p)
else:
recreate_flag = True
total_per_file[i] = process_one_file(
datfile_i,
npzfile,
i,
total_per_file[i],
)
# report and save total into a file
total_count = np.sum(total_per_file)
if not path.exists(total_file):
np.savez_compressed(total_file, total_per_file=total_per_file)
print("Total number of samples:", total_count)
print("Divided into days/splits:\n", total_per_file)
# dictionary files
counts = np.zeros(26, dtype=np.int32)
if recreate_flag:
# create dictionaries
for j in range(26):
for i, x in enumerate(convertDicts[j]):
convertDicts[j][x] = i
dict_file_j = d_path + d_file + "_fea_dict_{0}.npz".format(j)
if not path.exists(dict_file_j):
np.savez_compressed(
dict_file_j,
unique=np.array(list(convertDicts[j]), dtype=np.int32)
)
counts[j] = len(convertDicts[j])
# store (uniques and) counts
count_file = d_path + d_file + "_fea_count.npz"
if not path.exists(count_file):
np.savez_compressed(count_file, counts=counts)
else:
# create dictionaries (from existing files)
for j in range(26):
with np.load(d_path + d_file + "_fea_dict_{0}.npz".format(j)) as data:
unique = data["unique"]
for i, x in enumerate(unique):
convertDicts[j][x] = i
# load (uniques and) counts
with np.load(d_path + d_file + "_fea_count.npz") as data:
counts = data["counts"]
# process all splits
processCriteoAdData(d_path, d_file, npzfile, days, convertDicts, counts)
o_file = concatCriteoAdData(
d_path,
d_file,
npzfile,
trafile,
days,
data_split,
randomize,
total_per_file,
total_count,
memory_map,
o_filename
)
return o_file
def loadDataset(
dataset,
max_ind_range,
sub_sample_rate,
randomize,
data_split,
raw_path="",
pro_data="",
memory_map=False
):
# dataset
if dataset == "kaggle":
days = 7
o_filename = "kaggleAdDisplayChallenge_processed"
elif dataset == "terabyte":
days = 24
o_filename = "terabyte_processed"
else:
raise(ValueError("Data set option is not supported"))
# split the datafile into path and filename
lstr = raw_path.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1]
npzfile = d_path + ((d_file + "_day") if dataset == "kaggle" else d_file)
# trafile = d_path + ((d_file + "_fea") if dataset == "kaggle" else "fea")
# check if pre-processed data is available
data_ready = True
if memory_map:
for i in range(days):
reo_data = d_path + npzfile + "_{0}_reordered.npz".format(i)
if not path.exists(str(reo_data)):
data_ready = False
else:
if not path.exists(str(pro_data)):
data_ready = False
# pre-process data if needed
# WARNNING: when memory mapping is used we get a collection of files
if data_ready:
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
print("Reading raw data=%s" % (str(raw_path)))
file = getCriteoAdData(
raw_path,
o_filename,
max_ind_range,
sub_sample_rate,
days,
data_split,
randomize,
dataset == "kaggle",
memory_map
)
return file, days
if __name__ == "__main__":
### import packages ###
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(
description="Preprocess Criteo dataset"
)
# model related parameters
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--memory-map", action="store_true", default=False)
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
args = parser.parse_args()
loadDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the dlrm benchmark
# The inpts and outputs are generated according to the following three option(s)
# 1) random distribution
# 2) synthetic distribution, based on unique accesses and distances between them
# i) R. Hassan, A. Harris, N. Topham and A. Efthymiou "Synthetic Trace-Driven
# Simulation of Cache Memory", IEEE AINAM'07
# 3) public data set
# i) Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
# ii) Criteo Terabyte Dataset
# https://labs.criteo.com/2013/12/download-terabyte-click-logs
from __future__ import absolute_import, division, print_function, unicode_literals
# others
# from os import path
import sys
import bisect
import collections
import data_utils
# numpy
import numpy as np
from numpy import random as ra
# Kaggle Display Advertising Challenge Dataset
# dataset (str): name of dataset (Kaggle or Terabyte)
# randomize (str): determines randomization scheme
# 'none': no randomization
# 'day': randomizes each day's data (only works if split = True)
# 'total': randomizes total dataset
# split (bool) : to split into train, test, validation data-sets
def read_dataset(
dataset,
max_ind_range,
sub_sample_rate,
mini_batch_size,
num_batches,
randomize,
split="train",
raw_data="",
processed_data="",
memory_map=False,
inference_only=False,
):
# split the datafile into path and filename
lstr = raw_data.split("/")
d_path = "/".join(lstr[0:-1]) + "/"
d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1]
# npzfile = d_path + ((d_file + "_day") if dataset == "kaggle" else d_file)
# trafile = d_path + ((d_file + "_fea") if dataset == "kaggle" else "fea")
# load
print("Loading %s dataset..." % dataset)
nbatches = 0
file, days = data_utils.loadDataset(
dataset, max_ind_range, sub_sample_rate, randomize,
split, raw_data, processed_data, memory_map
)
if memory_map:
# WARNING: at this point the data has been reordered and shuffled across files
# e.g. day_<number>_reordered.npz, what remains is simply to read and feed
# the data from each file, going in the order of days file-by-file, to the
# model during training.
sys.exit("ERROR: --memory-map option is not supported for Caffe2 version.")
else:
# load and preprocess data
with np.load(file) as data:
X_int = data["X_int"]
X_cat = data["X_cat"]
y = data["y"]
counts = data["counts"]
# get a number of samples per day
total_file = d_path + d_file + "_day_count.npz"
with np.load(total_file) as data:
total_per_file = data["total_per_file"]
# transform
(X_cat_train, X_int_train, y_train, X_cat_val, X_int_val, y_val,
X_cat_test, X_int_test, y_test) = data_utils.transformCriteoAdData(
X_cat, X_int, y, days, split, randomize, total_per_file
)
ln_emb = counts
m_den = X_int_train.shape[1]
n_emb = len(counts)
print("Sparse features = %d, Dense features = %d" % (n_emb, m_den))
# adjust parameters
def assemble_samples(X_cat, X_int, y, max_ind_range, print_message):
if max_ind_range > 0:
X_cat = X_cat % max_ind_range
nsamples = len(y)
data_size = nsamples
# using floor is equivalent to dropping last mini-batch (drop_last = True)
nbatches = int(np.floor((data_size * 1.0) / mini_batch_size))
print(print_message)
if num_batches != 0 and num_batches < nbatches:
print(
"Limiting to %d batches of the total % d batches"
% (num_batches, nbatches)
)
nbatches = num_batches
else:
print("Total number of batches %d" % nbatches)
# data main loop
lX = []
lS_lengths = []
lS_indices = []
lT = []
for j in range(0, nbatches):
# number of data points in a batch
print("Reading in batch: %d / %d" % (j + 1, nbatches), end="\r")
n = min(mini_batch_size, data_size - (j * mini_batch_size))
# dense feature
idx_start = j * mini_batch_size
lX.append(
(X_int[idx_start : (idx_start + n)]).astype(np.float32)
)
# Targets - outputs
lT.append(
(y[idx_start : idx_start + n])
.reshape(-1, 1)
.astype(np.int32)
)
# sparse feature (sparse indices)
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for size in range(n_emb):
lS_batch_indices = []
for _b in range(n):
# num of sparse indices to be used per embedding, e.g. for
# store lengths and indices
lS_batch_indices += (
(X_cat[idx_start + _b][size].reshape(-1))
.astype(np.int32)
).tolist()
lS_emb_indices.append(lS_batch_indices)
lS_indices.append(lS_emb_indices)
# Criteo Kaggle data it is 1 because data is categorical
lS_lengths.append(
[(list(np.ones(n).astype(np.int32))) for _ in range(n_emb)]
)
print("\n")
return nbatches, lX, lS_lengths, lS_indices, lT
# adjust training data
(nbatches, lX, lS_lengths, lS_indices, lT) = assemble_samples(
X_cat_train, X_int_train, y_train, max_ind_range, "Training data"
)
# adjust testing data
(nbatches_t, lX_t, lS_lengths_t, lS_indices_t, lT_t) = assemble_samples(
X_cat_test, X_int_test, y_test, max_ind_range, "Testing data"
)
#end if memory_map
return (
nbatches,
lX,
lS_lengths,
lS_indices,
lT,
nbatches_t,
lX_t,
lS_lengths_t,
lS_indices_t,
lT_t,
ln_emb,
m_den,
)
def generate_random_data(
m_den,
ln_emb,
data_size,
num_batches,
mini_batch_size,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
num_targets=1,
round_targets=False,
data_generation="random",
trace_file="",
enable_padding=False,
):
nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))
if num_batches != 0:
nbatches = num_batches
data_size = nbatches * mini_batch_size
# print("Total number of batches %d" % nbatches)
# inputs and targets
lT = []
lX = []
lS_lengths = []
lS_indices = []
for j in range(0, nbatches):
# number of data points in a batch
n = min(mini_batch_size, data_size - (j * mini_batch_size))
# generate a batch of dense and sparse features
if data_generation == "random":
(Xt, lS_emb_lengths, lS_emb_indices) = generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed
)
elif data_generation == "synthetic":
(Xt, lS_emb_lengths, lS_emb_indices) = generate_synthetic_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
trace_file,
enable_padding
)
else:
sys.exit(
"ERROR: --data-generation=" + data_generation + " is not supported"
)
# dense feature
lX.append(Xt)
# sparse feature (sparse indices)
lS_lengths.append(lS_emb_lengths)
lS_indices.append(lS_emb_indices)
# generate a batch of target (probability of a click)
P = generate_random_output_batch(n, num_targets, round_targets)
lT.append(P)
return (nbatches, lX, lS_lengths, lS_indices, lT)
def generate_random_output_batch(n, num_targets=1, round_targets=False):
# target (probability of a click)
if round_targets:
P = np.round(ra.rand(n, num_targets).astype(np.float32)).astype(np.int32)
else:
P = ra.rand(n, num_targets).astype(np.float32)
return P
# uniform ditribution (input data)
def generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
):
# dense feature
Xt = ra.rand(n, m_den).astype(np.float32)
# sparse feature (sparse indices)
lS_emb_lengths = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for size in ln_emb:
lS_batch_lengths = []
lS_batch_indices = []
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = np.int32(num_indices_per_lookup)
else:
# random between [1,num_indices_per_lookup])
r = ra.random(1)
sparse_group_size = np.int32(
max(1, np.round(r * min(size, num_indices_per_lookup))[0])
)
# sparse indices to be used per embedding
r = ra.random(sparse_group_size)
sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int32))
# reset sparse_group_size in case some index duplicates were removed
sparse_group_size = np.int32(sparse_group.size)
# store lengths and indices
lS_batch_lengths += [sparse_group_size]
lS_batch_indices += sparse_group.tolist()
lS_emb_lengths.append(lS_batch_lengths)
lS_emb_indices.append(lS_batch_indices)
return (Xt, lS_emb_lengths, lS_emb_indices)
# synthetic distribution (input data)
def generate_synthetic_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
trace_file,
enable_padding=False,
):
# dense feature
Xt = ra.rand(n, m_den).astype(np.float32)
# sparse feature (sparse indices)
lS_emb_lengths = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for i, size in enumerate(ln_emb):
lS_batch_lengths = []
lS_batch_indices = []
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = np.int32(num_indices_per_lookup)
else:
# random between [1,num_indices_per_lookup])
r = ra.random(1)
sparse_group_size = np.int32(
max(1, np.round(r * min(size, num_indices_per_lookup))[0])
)
# sparse indices to be used per embedding
file_path = trace_file
line_accesses, list_sd, cumm_sd = read_dist_from_file(
file_path.replace("j", str(i))
)
# debug print
# print('input')
# print(line_accesses); print(list_sd); print(cumm_sd);
# print(sparse_group_size)
# approach 1: rand
# r = trace_generate_rand(
# line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding
# )
# approach 2: lru
r = trace_generate_lru(
line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding
)
# WARNING: if the distribution in the file is not consistent with
# embedding table dimensions, below mod guards against out of
# range access
sparse_group = np.unique(r).astype(np.int32)
minsg = np.min(sparse_group)
maxsg = np.max(sparse_group)
if (minsg < 0) or (size <= maxsg):
print(
"WARNING: distribution is inconsistent with embedding "
+ "table size (using mod to recover and continue)"
)
sparse_group = np.mod(sparse_group, size).astype(np.int32)
# sparse_group = np.unique(np.array(np.mod(r, size-1)).astype(np.int32))
# reset sparse_group_size in case some index duplicates were removed
sparse_group_size = np.int32(sparse_group.size)
# store lengths and indices
lS_batch_lengths += [sparse_group_size]
lS_batch_indices += sparse_group.tolist()
lS_emb_lengths.append(lS_batch_lengths)
lS_emb_indices.append(lS_batch_indices)
return (Xt, lS_emb_lengths, lS_emb_indices)
def generate_stack_distance(cumm_val, cumm_dist, max_i, i, enable_padding=False):
u = ra.rand(1)
if i < max_i:
# only generate stack distances up to the number of new references seen so far
j = bisect.bisect(cumm_val, i) - 1
fi = cumm_dist[j]
u *= fi # shrink distribution support to exclude last values
elif enable_padding:
# WARNING: disable generation of new references (once all have been seen)
fi = cumm_dist[0]
u = (1.0 - fi) * u + fi # remap distribution support to exclude first value
for (j, f) in enumerate(cumm_dist):
if u <= f:
return cumm_val[j]
# WARNING: global define, must be consistent across all synthetic functions
cache_line_size = 1
def trace_generate_lru(
line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False
):
max_sd = list_sd[-1]
l = len(line_accesses)
i = 0
ztrace = []
for _ in range(out_trace_len):
sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding)
mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0
# generate memory reference
if sd == 0: # new reference #
line_ref = line_accesses.pop(0)
line_accesses.append(line_ref)
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
i += 1
else: # existing reference #
line_ref = line_accesses[l - sd]
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
line_accesses.pop(l - sd)
line_accesses.append(line_ref)
# save generated memory reference
ztrace.append(mem_ref)
return ztrace
def trace_generate_rand(
line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False
):
max_sd = list_sd[-1]
l = len(line_accesses) # !!!Unique,
i = 0
ztrace = []
for _ in range(out_trace_len):
sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding)
mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0
# generate memory reference
if sd == 0: # new reference #
line_ref = line_accesses.pop(0)
line_accesses.append(line_ref)
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
i += 1
else: # existing reference #
line_ref = line_accesses[l - sd]
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
ztrace.append(mem_ref)
return ztrace
def trace_profile(trace, enable_padding=False):
# number of elements in the array (assuming 1D)
# n = trace.size
rstack = [] # S
stack_distances = [] # SDS
line_accesses = [] # L
for x in trace:
r = np.uint64(x / cache_line_size)
l = len(rstack)
try: # found #
i = rstack.index(r)
# WARNING: I believe below is the correct depth in terms of meaning of the
# algorithm, but that is not what seems to be in the paper alg.
# -1 can be subtracted if we defined the distance between
# consecutive accesses (e.g. r, r) as 0 rather than 1.
sd = l - i # - 1
# push r to the end of stack_distances
stack_distances.insert(0, sd)
# remove r from its position and insert to the top of stack
rstack.pop(i) # rstack.remove(r)
rstack.insert(l - 1, r)
except ValueError: # not found #
sd = 0 # -1
# push r to the end of stack_distances/line_accesses
stack_distances.insert(0, sd)
line_accesses.insert(0, r)
# push r to the top of stack
rstack.insert(l, r)
if enable_padding:
# WARNING: notice that as the ratio between the number of samples (l)
# and cardinality (c) of a sample increases the probability of
# generating a sample gets smaller and smaller because there are
# few new samples compared to repeated samples. This means that for a
# long trace with relatively small cardinality it will take longer to
# generate all new samples and therefore obtain full distribution support
# and hence it takes longer for distribution to resemble the original.
# Therefore, we may pad the number of new samples to be on par with
# average number of samples l/c artificially.
l = len(stack_distances)
c = max(stack_distances)
padding = int(np.ceil(l / c))
stack_distances = stack_distances + [0] * padding
return (rstack, stack_distances, line_accesses)
# auxiliary read/write routines
def read_trace_from_file(file_path):
try:
with open(file_path) as f:
if args.trace_file_binary_type:
array = np.fromfile(f, dtype=np.uint64)
trace = array.astype(np.uint64).tolist()
else:
line = f.readline()
trace = list(map(lambda x: np.uint64(x), line.split(", ")))
return trace
except Exception:
print("ERROR: no input trace file has been provided")
def write_trace_to_file(file_path, trace):
try:
if args.trace_file_binary_type:
with open(file_path, "wb+") as f:
np.array(trace).astype(np.uint64).tofile(f)
else:
with open(file_path, "w+") as f:
s = str(trace)
f.write(s[1 : len(s) - 1])
except Exception:
print("ERROR: no output trace file has been provided")
def read_dist_from_file(file_path):
try:
with open(file_path, "r") as f:
lines = f.read().splitlines()
except Exception:
print("Wrong file or file path")
# read unique accesses
unique_accesses = [int(el) for el in lines[0].split(", ")]
# read cumulative distribution (elements are passed as two separate lists)
list_sd = [int(el) for el in lines[1].split(", ")]
cumm_sd = [float(el) for el in lines[2].split(", ")]
return unique_accesses, list_sd, cumm_sd
def write_dist_to_file(file_path, unique_accesses, list_sd, cumm_sd):
try:
with open(file_path, "w") as f:
# unique_acesses
s = str(unique_accesses)
f.write(s[1 : len(s) - 1] + "\n")
# list_sd
s = str(list_sd)
f.write(s[1 : len(s) - 1] + "\n")
# cumm_sd
s = str(cumm_sd)
f.write(s[1 : len(s) - 1] + "\n")
except Exception:
print("Wrong file or file path")
if __name__ == "__main__":
import sys
import operator
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(description="Generate Synthetic Distributions")
parser.add_argument("--trace-file", type=str, default="./input/trace.log")
parser.add_argument("--trace-file-binary-type", type=bool, default=False)
parser.add_argument("--trace-enable-padding", type=bool, default=False)
parser.add_argument("--dist-file", type=str, default="./input/dist.log")
parser.add_argument(
"--synthetic-file", type=str, default="./input/trace_synthetic.log"
)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--print-precision", type=int, default=5)
args = parser.parse_args()
### some basic setup ###
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
### read trace ###
trace = read_trace_from_file(args.trace_file)
# print(trace)
### profile trace ###
(_, stack_distances, line_accesses) = trace_profile(
trace, args.trace_enable_padding
)
stack_distances.reverse()
line_accesses.reverse()
# print(line_accesses)
# print(stack_distances)
### compute probability distribution ###
# count items
l = len(stack_distances)
dc = sorted(
collections.Counter(stack_distances).items(), key=operator.itemgetter(0)
)
# create a distribution
list_sd = list(map(lambda tuple_x_k: tuple_x_k[0], dc)) # x = tuple_x_k[0]
dist_sd = list(
map(lambda tuple_x_k: tuple_x_k[1] / float(l), dc)
) # k = tuple_x_k[1]
cumm_sd = [] # np.cumsum(dc).tolist() #prefixsum
for i, (_, k) in enumerate(dc):
if i == 0:
cumm_sd.append(k / float(l))
else:
# add the 2nd element of the i-th tuple in the dist_sd list
cumm_sd.append(cumm_sd[i - 1] + (k / float(l)))
### write stack_distance and line_accesses to a file ###
write_dist_to_file(args.dist_file, line_accesses, list_sd, cumm_sd)
### generate correspondinf synthetic ###
# line_accesses, list_sd, cumm_sd = read_dist_from_file(args.dist_file)
synthetic_trace = trace_generate_lru(
line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding
)
# synthetic_trace = trace_generate_rand(
# line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding
# )
write_trace_to_file(args.synthetic_file, synthetic_trace)
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import numpy as np
from torch.utils.data import Dataset
import torch
import time
import math
from tqdm import tqdm
import argparse
class DataLoader:
"""
DataLoader dedicated for the Criteo Terabyte Click Logs dataset
"""
def __init__(
self,
data_filename,
data_directory,
days,
batch_size,
max_ind_range=-1,
split="train",
drop_last_batch=False
):
self.data_filename = data_filename
self.data_directory = data_directory
self.days = days
self.batch_size = batch_size
self.max_ind_range = max_ind_range
total_file = os.path.join(
data_directory,
data_filename + "_day_count.npz"
)
with np.load(total_file) as data:
total_per_file = data["total_per_file"][np.array(days)]
self.length = sum(total_per_file)
if split == "test" or split == "val":
self.length = int(np.ceil(self.length / 2.))
self.split = split
self.drop_last_batch = drop_last_batch
def __iter__(self):
return iter(
_batch_generator(
self.data_filename, self.data_directory, self.days,
self.batch_size, self.split, self.drop_last_batch, self.max_ind_range
)
)
def __len__(self):
if self.drop_last_batch:
return self.length // self.batch_size
else:
return math.ceil(self.length / self.batch_size)
def _transform_features(
x_int_batch, x_cat_batch, y_batch, max_ind_range, flag_input_torch_tensor=False
):
if max_ind_range > 0:
x_cat_batch = x_cat_batch % max_ind_range
if flag_input_torch_tensor:
x_int_batch = torch.log(x_int_batch.clone().detach().type(torch.float) + 1)
x_cat_batch = x_cat_batch.clone().detach().type(torch.long)
y_batch = y_batch.clone().detach().type(torch.float32).view(-1, 1)
else:
x_int_batch = torch.log(torch.tensor(x_int_batch, dtype=torch.float) + 1)
x_cat_batch = torch.tensor(x_cat_batch, dtype=torch.long)
y_batch = torch.tensor(y_batch, dtype=torch.float32).view(-1, 1)
batch_size = x_cat_batch.shape[0]
feature_count = x_cat_batch.shape[1]
lS_o = torch.arange(batch_size).reshape(1, -1).repeat(feature_count, 1)
return x_int_batch, lS_o, x_cat_batch.t(), y_batch.view(-1, 1)
def _batch_generator(
data_filename, data_directory, days, batch_size, split, drop_last, max_ind_range
):
previous_file = None
for day in days:
filepath = os.path.join(
data_directory,
data_filename + "_{}_reordered.npz".format(day)
)
# print('Loading file: ', filepath)
with np.load(filepath) as data:
x_int = data["X_int"]
x_cat = data["X_cat"]
y = data["y"]
samples_in_file = y.shape[0]
batch_start_idx = 0
if split == "test" or split == "val":
length = int(np.ceil(samples_in_file / 2.))
if split == "test":
samples_in_file = length
elif split == "val":
batch_start_idx = samples_in_file - length
while batch_start_idx < samples_in_file - batch_size:
missing_samples = batch_size
if previous_file is not None:
missing_samples -= previous_file['y'].shape[0]
current_slice = slice(batch_start_idx, batch_start_idx + missing_samples)
x_int_batch = x_int[current_slice]
x_cat_batch = x_cat[current_slice]
y_batch = y[current_slice]
if previous_file is not None:
x_int_batch = np.concatenate(
[previous_file['x_int'], x_int_batch],
axis=0
)
x_cat_batch = np.concatenate(
[previous_file['x_cat'], x_cat_batch],
axis=0
)
y_batch = np.concatenate([previous_file['y'], y_batch], axis=0)
previous_file = None
if x_int_batch.shape[0] != batch_size:
raise ValueError('should not happen')
yield _transform_features(x_int_batch, x_cat_batch, y_batch, max_ind_range)
batch_start_idx += missing_samples
if batch_start_idx != samples_in_file:
current_slice = slice(batch_start_idx, samples_in_file)
if previous_file is not None:
previous_file = {
'x_int' : np.concatenate(
[previous_file['x_int'], x_int[current_slice]],
axis=0
),
'x_cat' : np.concatenate(
[previous_file['x_cat'], x_cat[current_slice]],
axis=0
),
'y' : np.concatenate([previous_file['y'], y[current_slice]], axis=0)
}
else:
previous_file = {
'x_int' : x_int[current_slice],
'x_cat' : x_cat[current_slice],
'y' : y[current_slice]
}
if not drop_last:
yield _transform_features(
previous_file['x_int'],
previous_file['x_cat'],
previous_file['y'],
max_ind_range
)
def _test():
generator = _batch_generator(
data_filename='day',
data_directory='/input',
days=range(23),
split="train",
batch_size=2048
)
t1 = time.time()
for x_int, lS_o, x_cat, y in generator:
t2 = time.time()
time_diff = t2 - t1
t1 = t2
print(
"time {} x_int.shape: {} lS_o.shape: {} x_cat.shape: {} y.shape: {}".format(
time_diff, x_int.shape, lS_o.shape, x_cat.shape, y.shape
)
)
class CriteoBinDataset(Dataset):
"""Binary version of criteo dataset."""
def __init__(self, data_file, counts_file,
batch_size=1, max_ind_range=-1, bytes_per_feature=4):
# dataset
self.tar_fea = 1 # single target
self.den_fea = 13 # 13 dense features
self.spa_fea = 26 # 26 sparse features
self.tad_fea = self.tar_fea + self.den_fea
self.tot_fea = self.tad_fea + self.spa_fea
self.batch_size = batch_size
self.max_ind_range = max_ind_range
self.bytes_per_entry = (bytes_per_feature * self.tot_fea * batch_size)
self.num_entries = math.ceil(os.path.getsize(data_file) / self.bytes_per_entry)
print('data file:', data_file, 'number of batches:', self.num_entries)
self.file = open(data_file, 'rb')
with np.load(counts_file) as data:
self.counts = data["counts"]
# hardcoded for now
self.m_den = 13
def __len__(self):
return self.num_entries
def __getitem__(self, idx):
self.file.seek(idx * self.bytes_per_entry, 0)
raw_data = self.file.read(self.bytes_per_entry)
array = np.frombuffer(raw_data, dtype=np.int32)
tensor = torch.from_numpy(array).view((-1, self.tot_fea))
return _transform_features(x_int_batch=tensor[:, 1:14],
x_cat_batch=tensor[:, 14:],
y_batch=tensor[:, 0],
max_ind_range=self.max_ind_range,
flag_input_torch_tensor=True)
def numpy_to_binary(input_files, output_file_path, split='train'):
"""Convert the data to a binary format to be read with CriteoBinDataset."""
# WARNING - both categorical and numerical data must fit into int32 for
# the following code to work correctly
with open(output_file_path, 'wb') as output_file:
if split == 'train':
for input_file in input_files:
print('Processing file: ', input_file)
np_data = np.load(input_file)
np_data = np.concatenate([np_data['y'].reshape(-1, 1),
np_data['X_int'],
np_data['X_cat']], axis=1)
np_data = np_data.astype(np.int32)
output_file.write(np_data.tobytes())
else:
assert len(input_files) == 1
np_data = np.load(input_files[0])
np_data = np.concatenate([np_data['y'].reshape(-1, 1),
np_data['X_int'],
np_data['X_cat']], axis=1)
np_data = np_data.astype(np.int32)
samples_in_file = np_data.shape[0]
midpoint = int(np.ceil(samples_in_file / 2.))
if split == "test":
begin = 0
end = midpoint
elif split == "val":
begin = midpoint
end = samples_in_file
else:
raise ValueError('Unknown split value: ', split)
output_file.write(np_data[begin:end].tobytes())
def _preprocess(args):
train_files = ['{}_{}_reordered.npz'.format(args.input_data_prefix, day) for
day in range(0, 23)]
test_valid_file = args.input_data_prefix + '_23_reordered.npz'
os.makedirs(args.output_directory, exist_ok=True)
for split in ['train', 'val', 'test']:
print('Running preprocessing for split =', split)
output_file = os.path.join(args.output_directory,
'{}_data.bin'.format(split))
input_files = train_files if split == 'train' else [test_valid_file]
numpy_to_binary(input_files=input_files,
output_file_path=output_file,
split=split)
def _test_bin():
parser = argparse.ArgumentParser()
parser.add_argument('--output_directory', required=True)
parser.add_argument('--input_data_prefix', required=True)
parser.add_argument('--split', choices=['train', 'test', 'val'],
required=True)
args = parser.parse_args()
# _preprocess(args)
binary_data_file = os.path.join(args.output_directory,
'{}_data.bin'.format(args.split))
counts_file = os.path.join(args.output_directory, 'day_fea_count.npz')
dataset_binary = CriteoBinDataset(data_file=binary_data_file,
counts_file=counts_file,
batch_size=2048,)
from dlrm_data_pytorch import CriteoDataset, collate_wrapper_criteo
binary_loader = torch.utils.data.DataLoader(
dataset_binary,
batch_size=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
original_dataset = CriteoDataset(
dataset='terabyte',
max_ind_range=10 * 1000 * 1000,
sub_sample_rate=1,
randomize=True,
split=args.split,
raw_path=args.input_data_prefix,
pro_data='dummy_string',
memory_map=True
)
original_loader = torch.utils.data.DataLoader(
original_dataset,
batch_size=2048,
shuffle=False,
num_workers=0,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False,
)
assert len(dataset_binary) == len(original_loader)
for i, (old_batch, new_batch) in tqdm(enumerate(zip(original_loader,
binary_loader)),
total=len(dataset_binary)):
for j in range(len(new_batch)):
if not np.array_equal(old_batch[j], new_batch[j]):
raise ValueError('FAILED: Datasets not equal')
if i > len(dataset_binary):
break
print('PASSED')
if __name__ == '__main__':
_test()
_test_bin
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: generate inputs and targets for the dlrm benchmark
# The inpts and outputs are generated according to the following three option(s)
# 1) random distribution
# 2) synthetic distribution, based on unique accesses and distances between them
# i) R. Hassan, A. Harris, N. Topham and A. Efthymiou "Synthetic Trace-Driven
# Simulation of Cache Memory", IEEE AINAM'07
# 3) public data set
# i) Criteo Kaggle Display Advertising Challenge Dataset
# https://labs.criteo.com/2014/02/kaggle-display-advertising-challenge-dataset
# ii) Criteo Terabyte Dataset
# https://labs.criteo.com/2013/12/download-terabyte-click-logs
from __future__ import absolute_import, division, print_function, unicode_literals
# others
from os import path
import bisect
import collections
from . import data_utils
# numpy
import numpy as np
from numpy import random as ra
# pytorch
import torch
from torch.utils.data import Dataset, RandomSampler
from . import data_loader_terabyte
# Kaggle Display Advertising Challenge Dataset
# dataset (str): name of dataset (Kaggle or Terabyte)
# randomize (str): determines randomization scheme
# "none": no randomization
# "day": randomizes each day"s data (only works if split = True)
# "total": randomizes total dataset
# split (bool) : to split into train, test, validation data-sets
class CriteoDataset(Dataset):
def __init__(
self,
dataset,
max_ind_range,
sub_sample_rate,
randomize,
split="train",
raw_path="",
pro_data="",
memory_map=False
):
# dataset
# tar_fea = 1 # single target
den_fea = 13 # 13 dense features
# spa_fea = 26 # 26 sparse features
# tad_fea = tar_fea + den_fea
# tot_fea = tad_fea + spa_fea
if dataset == "kaggle":
days = 7
out_file = "kaggleAdDisplayChallenge_processed"
elif dataset == "terabyte":
days = 24
out_file = "terabyte_processed"
else:
raise(ValueError("Data set option is not supported"))
self.max_ind_range = max_ind_range
self.memory_map = memory_map
# split the datafile into path and filename
lstr = raw_path.split("/")
self.d_path = "/".join(lstr[0:-1]) + "/"
self.d_file = lstr[-1].split(".")[0] if dataset == "kaggle" else lstr[-1]
self.npzfile = self.d_path + (
(self.d_file + "_day") if dataset == "kaggle" else self.d_file
)
self.trafile = self.d_path + (
(self.d_file + "_fea") if dataset == "kaggle" else "fea"
)
# check if pre-processed data is available
data_ready = True
if memory_map:
for i in range(days):
reo_data = self.npzfile + "_{0}_reordered.npz".format(i)
if not path.exists(str(reo_data)):
data_ready = False
else:
if not path.exists(str(pro_data)):
data_ready = False
# pre-process data if needed
# WARNNING: when memory mapping is used we get a collection of files
if data_ready:
print("Reading pre-processed data=%s" % (str(pro_data)))
file = str(pro_data)
else:
print("Reading raw data=%s" % (str(raw_path)))
file = data_utils.getCriteoAdData(
raw_path,
out_file,
max_ind_range,
sub_sample_rate,
days,
split,
randomize,
dataset == "kaggle",
memory_map
)
# get a number of samples per day
total_file = self.d_path + self.d_file + "_day_count.npz"
with np.load(total_file) as data:
total_per_file = data["total_per_file"]
# compute offsets per file
self.offset_per_file = np.array([0] + [x for x in total_per_file])
for i in range(days):
self.offset_per_file[i + 1] += self.offset_per_file[i]
# print(self.offset_per_file)
# setup data
if memory_map:
# setup the training/testing split
self.split = split
if split == 'none' or split == 'train':
self.day = 0
self.max_day_range = days if split == 'none' else days - 1
elif split == 'test' or split == 'val':
self.day = days - 1
num_samples = self.offset_per_file[days] - \
self.offset_per_file[days - 1]
self.test_size = int(np.ceil(num_samples / 2.))
self.val_size = num_samples - self.test_size
else:
sys.exit("ERROR: dataset split is neither none, nor train or test.")
'''
# text
print("text")
for i in range(days):
fi = self.npzfile + "_{0}".format(i)
with open(fi) as data:
ttt = 0; nnn = 0
for _j, line in enumerate(data):
ttt +=1
if np.int32(line[0]) > 0:
nnn +=1
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
# processed
print("processed")
for i in range(days):
fi = self.npzfile + "_{0}_processed.npz".format(i)
with np.load(fi) as data:
yyy = data["y"]
ttt = len(yyy)
nnn = np.count_nonzero(yyy)
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
# reordered
print("reordered")
for i in range(days):
fi = self.npzfile + "_{0}_reordered.npz".format(i)
with np.load(fi) as data:
yyy = data["y"]
ttt = len(yyy)
nnn = np.count_nonzero(yyy)
print("day=" + str(i) + " total=" + str(ttt) + " non-zeros="
+ str(nnn) + " ratio=" +str((nnn * 100.) / ttt) + "%")
'''
# load unique counts
with np.load(self.d_path + self.d_file + "_fea_count.npz") as data:
self.counts = data["counts"]
self.m_den = den_fea # X_int.shape[1]
self.n_emb = len(self.counts)
print("Sparse features= %d, Dense features= %d" % (self.n_emb, self.m_den))
# Load the test data
# Only a single day is used for testing
if self.split == 'test' or self.split == 'val':
# only a single day is used for testing
fi = self.npzfile + "_{0}_reordered.npz".format(
self.day
)
with np.load(fi) as data:
self.X_int = data["X_int"] # continuous feature
self.X_cat = data["X_cat"] # categorical feature
self.y = data["y"] # target
else:
# load and preprocess data
with np.load(file) as data:
X_int = data["X_int"] # continuous feature
X_cat = data["X_cat"] # categorical feature
y = data["y"] # target
self.counts = data["counts"]
self.m_den = X_int.shape[1] # den_fea
self.n_emb = len(self.counts)
print("Sparse fea = %d, Dense fea = %d" % (self.n_emb, self.m_den))
# create reordering
indices = np.arange(len(y))
if split == "none":
# randomize all data
if randomize == "total":
indices = np.random.permutation(indices)
print("Randomized indices...")
X_int[indices] = X_int
X_cat[indices] = X_cat
y[indices] = y
else:
indices = np.array_split(indices, self.offset_per_file[1:-1])
# randomize train data (per day)
if randomize == "day": # or randomize == "total":
for i in range(len(indices) - 1):
indices[i] = np.random.permutation(indices[i])
print("Randomized indices per day ...")
train_indices = np.concatenate(indices[:-1])
test_indices = indices[-1]
test_indices, val_indices = np.array_split(test_indices, 2)
print("Defined %s indices..." % (split))
# randomize train data (across days)
if randomize == "total":
train_indices = np.random.permutation(train_indices)
print("Randomized indices across days ...")
# create training, validation, and test sets
if split == 'train':
self.X_int = [X_int[i] for i in train_indices]
self.X_cat = [X_cat[i] for i in train_indices]
self.y = [y[i] for i in train_indices]
elif split == 'val':
self.X_int = [X_int[i] for i in val_indices]
self.X_cat = [X_cat[i] for i in val_indices]
self.y = [y[i] for i in val_indices]
elif split == 'test':
self.X_int = [X_int[i] for i in test_indices]
self.X_cat = [X_cat[i] for i in test_indices]
self.y = [y[i] for i in test_indices]
print("Split data according to indices...")
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
if self.memory_map:
if self.split == 'none' or self.split == 'train':
# check if need to swicth to next day and load data
if index == self.offset_per_file[self.day]:
# print("day_boundary switch", index)
self.day_boundary = self.offset_per_file[self.day]
fi = self.npzfile + "_{0}_reordered.npz".format(
self.day
)
# print('Loading file: ', fi)
with np.load(fi) as data:
self.X_int = data["X_int"] # continuous feature
self.X_cat = data["X_cat"] # categorical feature
self.y = data["y"] # target
self.day = (self.day + 1) % self.max_day_range
i = index - self.day_boundary
elif self.split == 'test' or self.split == 'val':
# only a single day is used for testing
i = index + (0 if self.split == 'test' else self.test_size)
else:
sys.exit("ERROR: dataset split is neither none, nor train or test.")
else:
i = index
if self.max_ind_range > 0:
return self.X_int[i], self.X_cat[i] % self.max_ind_range, self.y[i]
else:
return self.X_int[i], self.X_cat[i], self.y[i]
def _default_preprocess(self, X_int, X_cat, y):
X_int = torch.log(torch.tensor(X_int, dtype=torch.float) + 1)
if self.max_ind_range > 0:
X_cat = torch.tensor(X_cat % self.max_ind_range, dtype=torch.long)
else:
X_cat = torch.tensor(X_cat, dtype=torch.long)
y = torch.tensor(y.astype(np.float32))
return X_int, X_cat, y
def __len__(self):
if self.memory_map:
if self.split == 'none':
return self.offset_per_file[-1]
elif self.split == 'train':
return self.offset_per_file[-2]
elif self.split == 'test':
return self.test_size
elif self.split == 'val':
return self.val_size
else:
sys.exit("ERROR: dataset split is neither none, nor train nor test.")
else:
return len(self.y)
def collate_wrapper_criteo(list_of_tuples):
# where each tuple is (X_int, X_cat, y)
transposed_data = list(zip(*list_of_tuples))
X_int = torch.log(torch.tensor(transposed_data[0], dtype=torch.float) + 1)
X_cat = torch.tensor(transposed_data[1], dtype=torch.long)
T = torch.tensor(transposed_data[2], dtype=torch.float32).view(-1, 1)
batchSize = X_cat.shape[0]
featureCnt = X_cat.shape[1]
lS_i = [X_cat[:, i] for i in range(featureCnt)]
lS_o = [torch.tensor(range(batchSize)) for _ in range(featureCnt)]
return X_int, torch.stack(lS_o), torch.stack(lS_i), T
def ensure_dataset_preprocessed(args, d_path):
_ = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
_ = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
for split in ['train', 'val', 'test']:
print('Running preprocessing for split =', split)
train_files = ['{}_{}_reordered.npz'.format(args.raw_data_file, day)
for
day in range(0, 23)]
test_valid_file = args.raw_data_file + '_23_reordered.npz'
output_file = d_path + '_{}.bin'.format(split)
input_files = train_files if split == 'train' else [test_valid_file]
data_loader_terabyte.numpy_to_binary(input_files=input_files,
output_file_path=output_file,
split=split)
def make_criteo_data_and_loaders(args):
if args.mlperf_logging and args.memory_map and args.data_set == "terabyte":
# more efficient for larger batches
data_directory = path.dirname(args.raw_data_file)
if args.mlperf_bin_loader:
lstr = args.processed_data_file.split("/")
d_path = "/".join(lstr[0:-1]) + "/" + lstr[-1].split(".")[0]
train_file = d_path + "_train.bin"
test_file = d_path + "_test.bin"
# val_file = d_path + "_val.bin"
counts_file = args.raw_data_file + '_fea_count.npz'
if any(not path.exists(p) for p in [train_file,
test_file,
counts_file]):
ensure_dataset_preprocessed(args, d_path)
train_data = data_loader_terabyte.CriteoBinDataset(
data_file=train_file,
counts_file=counts_file,
batch_size=args.mini_batch_size,
max_ind_range=args.max_ind_range
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
sampler=RandomSampler(train_data) if args.mlperf_bin_shuffle else None
)
test_data = data_loader_terabyte.CriteoBinDataset(
data_file=test_file,
counts_file=counts_file,
batch_size=args.test_mini_batch_size,
max_ind_range=args.max_ind_range
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=None,
batch_sampler=None,
shuffle=False,
num_workers=0,
collate_fn=None,
pin_memory=False,
drop_last=False,
)
else:
data_filename = args.raw_data_file.split("/")[-1]
train_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
test_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
train_loader = data_loader_terabyte.DataLoader(
data_directory=data_directory,
data_filename=data_filename,
days=list(range(23)),
batch_size=args.mini_batch_size,
max_ind_range=args.max_ind_range,
split="train"
)
test_loader = data_loader_terabyte.DataLoader(
data_directory=data_directory,
data_filename=data_filename,
days=[23],
batch_size=args.test_mini_batch_size,
max_ind_range=args.max_ind_range,
split="test"
)
else:
train_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
test_data = CriteoDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"test",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=args.mini_batch_size,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False, # True
)
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=args.test_mini_batch_size,
shuffle=False,
num_workers=args.test_num_workers,
collate_fn=collate_wrapper_criteo,
pin_memory=False,
drop_last=False, # True
)
return train_data, train_loader, test_data, test_loader
# uniform ditribution (input data)
class RandomDataset(Dataset):
def __init__(
self,
m_den,
ln_emb,
data_size,
num_batches,
mini_batch_size,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
num_targets=1,
round_targets=False,
data_generation="random",
trace_file="",
enable_padding=False,
reset_seed_on_access=False,
rand_seed=0
):
# compute batch size
nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))
if num_batches != 0:
nbatches = num_batches
data_size = nbatches * mini_batch_size
# print("Total number of batches %d" % nbatches)
# save args (recompute data_size if needed)
self.m_den = m_den
self.ln_emb = ln_emb
self.data_size = data_size
self.num_batches = nbatches
self.mini_batch_size = mini_batch_size
self.num_indices_per_lookup = num_indices_per_lookup
self.num_indices_per_lookup_fixed = num_indices_per_lookup_fixed
self.num_targets = num_targets
self.round_targets = round_targets
self.data_generation = data_generation
self.trace_file = trace_file
self.enable_padding = enable_padding
self.reset_seed_on_access = reset_seed_on_access
self.rand_seed = rand_seed
def reset_numpy_seed(self, numpy_rand_seed):
np.random.seed(numpy_rand_seed)
# torch.manual_seed(numpy_rand_seed)
def __getitem__(self, index):
if isinstance(index, slice):
return [
self[idx] for idx in range(
index.start or 0, index.stop or len(self), index.step or 1
)
]
# WARNING: reset seed on access to first element
# (e.g. if same random samples needed across epochs)
if self.reset_seed_on_access and index == 0:
self.reset_numpy_seed(self.rand_seed)
# number of data points in a batch
n = min(self.mini_batch_size, self.data_size - (index * self.mini_batch_size))
# generate a batch of dense and sparse features
if self.data_generation == "random":
(X, lS_o, lS_i) = generate_uniform_input_batch(
self.m_den,
self.ln_emb,
n,
self.num_indices_per_lookup,
self.num_indices_per_lookup_fixed
)
elif self.data_generation == "synthetic":
(X, lS_o, lS_i) = generate_synthetic_input_batch(
self.m_den,
self.ln_emb,
n,
self.num_indices_per_lookup,
self.num_indices_per_lookup_fixed,
self.trace_file,
self.enable_padding
)
else:
sys.exit(
"ERROR: --data-generation=" + self.data_generation + " is not supported"
)
# generate a batch of target (probability of a click)
T = generate_random_output_batch(n, self.num_targets, self.round_targets)
return (X, lS_o, lS_i, T)
def __len__(self):
# WARNING: note that we produce bacthes of outputs in __getitem__
# therefore we should use num_batches rather than data_size below
return self.num_batches
def collate_wrapper_random(list_of_tuples):
# where each tuple is (X, lS_o, lS_i, T)
(X, lS_o, lS_i, T) = list_of_tuples[0]
return (X,
torch.stack(lS_o),
lS_i,
T)
def make_random_data_and_loader(args, ln_emb, m_den):
train_data = RandomDataset(
m_den,
ln_emb,
args.data_size,
args.num_batches,
args.mini_batch_size,
args.num_indices_per_lookup,
args.num_indices_per_lookup_fixed,
1, # num_targets
args.round_targets,
args.data_generation,
args.data_trace_file,
args.data_trace_enable_padding,
reset_seed_on_access=True,
rand_seed=args.numpy_rand_seed
) # WARNING: generates a batch of lookups at once
train_loader = torch.utils.data.DataLoader(
train_data,
batch_size=1,
shuffle=False,
num_workers=args.num_workers,
collate_fn=collate_wrapper_random,
pin_memory=False,
drop_last=False, # True
)
return train_data, train_loader
def generate_random_data(
m_den,
ln_emb,
data_size,
num_batches,
mini_batch_size,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
num_targets=1,
round_targets=False,
data_generation="random",
trace_file="",
enable_padding=False,
):
nbatches = int(np.ceil((data_size * 1.0) / mini_batch_size))
if num_batches != 0:
nbatches = num_batches
data_size = nbatches * mini_batch_size
# print("Total number of batches %d" % nbatches)
# inputs
lT = []
lX = []
lS_offsets = []
lS_indices = []
for j in range(0, nbatches):
# number of data points in a batch
n = min(mini_batch_size, data_size - (j * mini_batch_size))
# generate a batch of dense and sparse features
if data_generation == "random":
(Xt, lS_emb_offsets, lS_emb_indices) = generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed
)
elif data_generation == "synthetic":
(Xt, lS_emb_offsets, lS_emb_indices) = generate_synthetic_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
trace_file,
enable_padding
)
else:
sys.exit(
"ERROR: --data-generation=" + data_generation + " is not supported"
)
# dense feature
lX.append(Xt)
# sparse feature (sparse indices)
lS_offsets.append(lS_emb_offsets)
lS_indices.append(lS_emb_indices)
# generate a batch of target (probability of a click)
P = generate_random_output_batch(n, num_targets, round_targets)
lT.append(P)
return (nbatches, lX, lS_offsets, lS_indices, lT)
def generate_random_output_batch(n, num_targets, round_targets=False):
# target (probability of a click)
if round_targets:
P = np.round(ra.rand(n, num_targets).astype(np.float32)).astype(np.float32)
else:
P = ra.rand(n, num_targets).astype(np.float32)
return torch.tensor(P)
# uniform ditribution (input data)
def generate_uniform_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
):
# dense feature
Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))
# sparse feature (sparse indices)
lS_emb_offsets = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for size in ln_emb:
lS_batch_offsets = []
lS_batch_indices = []
offset = 0
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = np.int64(num_indices_per_lookup)
else:
# random between [1,num_indices_per_lookup])
r = ra.random(1)
sparse_group_size = np.int64(
np.round(max([1.0], r * min(size, num_indices_per_lookup)))
)
# sparse indices to be used per embedding
r = ra.random(sparse_group_size)
sparse_group = np.unique(np.round(r * (size - 1)).astype(np.int64))
# reset sparse_group_size in case some index duplicates were removed
sparse_group_size = np.int64(sparse_group.size)
# store lengths and indices
lS_batch_offsets += [offset]
lS_batch_indices += sparse_group.tolist()
# update offset for next iteration
offset += sparse_group_size
lS_emb_offsets.append(torch.tensor(lS_batch_offsets))
lS_emb_indices.append(torch.tensor(lS_batch_indices))
return (Xt, lS_emb_offsets, lS_emb_indices)
# synthetic distribution (input data)
def generate_synthetic_input_batch(
m_den,
ln_emb,
n,
num_indices_per_lookup,
num_indices_per_lookup_fixed,
trace_file,
enable_padding=False,
):
# dense feature
Xt = torch.tensor(ra.rand(n, m_den).astype(np.float32))
# sparse feature (sparse indices)
lS_emb_offsets = []
lS_emb_indices = []
# for each embedding generate a list of n lookups,
# where each lookup is composed of multiple sparse indices
for i, size in enumerate(ln_emb):
lS_batch_offsets = []
lS_batch_indices = []
offset = 0
for _ in range(n):
# num of sparse indices to be used per embedding (between
if num_indices_per_lookup_fixed:
sparse_group_size = np.int64(num_indices_per_lookup)
else:
# random between [1,num_indices_per_lookup])
r = ra.random(1)
sparse_group_size = np.int64(
max(1, np.round(r * min(size, num_indices_per_lookup))[0])
)
# sparse indices to be used per embedding
file_path = trace_file
line_accesses, list_sd, cumm_sd = read_dist_from_file(
file_path.replace("j", str(i))
)
# debug prints
# print("input")
# print(line_accesses); print(list_sd); print(cumm_sd);
# print(sparse_group_size)
# approach 1: rand
# r = trace_generate_rand(
# line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding
# )
# approach 2: lru
r = trace_generate_lru(
line_accesses, list_sd, cumm_sd, sparse_group_size, enable_padding
)
# WARNING: if the distribution in the file is not consistent
# with embedding table dimensions, below mod guards against out
# of range access
sparse_group = np.unique(r).astype(np.int64)
minsg = np.min(sparse_group)
maxsg = np.max(sparse_group)
if (minsg < 0) or (size <= maxsg):
print(
"WARNING: distribution is inconsistent with embedding "
+ "table size (using mod to recover and continue)"
)
sparse_group = np.mod(sparse_group, size).astype(np.int64)
# sparse_group = np.unique(np.array(np.mod(r, size-1)).astype(np.int64))
# reset sparse_group_size in case some index duplicates were removed
sparse_group_size = np.int64(sparse_group.size)
# store lengths and indices
lS_batch_offsets += [offset]
lS_batch_indices += sparse_group.tolist()
# update offset for next iteration
offset += sparse_group_size
lS_emb_offsets.append(torch.tensor(lS_batch_offsets))
lS_emb_indices.append(torch.tensor(lS_batch_indices))
return (Xt, lS_emb_offsets, lS_emb_indices)
def generate_stack_distance(cumm_val, cumm_dist, max_i, i, enable_padding=False):
u = ra.rand(1)
if i < max_i:
# only generate stack distances up to the number of new references seen so far
j = bisect.bisect(cumm_val, i) - 1
fi = cumm_dist[j]
u *= fi # shrink distribution support to exclude last values
elif enable_padding:
# WARNING: disable generation of new references (once all have been seen)
fi = cumm_dist[0]
u = (1.0 - fi) * u + fi # remap distribution support to exclude first value
for (j, f) in enumerate(cumm_dist):
if u <= f:
return cumm_val[j]
# WARNING: global define, must be consistent across all synthetic functions
cache_line_size = 1
def trace_generate_lru(
line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False
):
max_sd = list_sd[-1]
l = len(line_accesses)
i = 0
ztrace = []
for _ in range(out_trace_len):
sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding)
mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0
# generate memory reference
if sd == 0: # new reference #
line_ref = line_accesses.pop(0)
line_accesses.append(line_ref)
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
i += 1
else: # existing reference #
line_ref = line_accesses[l - sd]
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
line_accesses.pop(l - sd)
line_accesses.append(line_ref)
# save generated memory reference
ztrace.append(mem_ref)
return ztrace
def trace_generate_rand(
line_accesses, list_sd, cumm_sd, out_trace_len, enable_padding=False
):
max_sd = list_sd[-1]
l = len(line_accesses) # !!!Unique,
i = 0
ztrace = []
for _ in range(out_trace_len):
sd = generate_stack_distance(list_sd, cumm_sd, max_sd, i, enable_padding)
mem_ref_within_line = 0 # floor(ra.rand(1)*cache_line_size) #0
# generate memory reference
if sd == 0: # new reference #
line_ref = line_accesses.pop(0)
line_accesses.append(line_ref)
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
i += 1
else: # existing reference #
line_ref = line_accesses[l - sd]
mem_ref = np.uint64(line_ref * cache_line_size + mem_ref_within_line)
ztrace.append(mem_ref)
return ztrace
def trace_profile(trace, enable_padding=False):
# number of elements in the array (assuming 1D)
# n = trace.size
rstack = [] # S
stack_distances = [] # SDS
line_accesses = [] # L
for x in trace:
r = np.uint64(x / cache_line_size)
l = len(rstack)
try: # found #
i = rstack.index(r)
# WARNING: I believe below is the correct depth in terms of meaning of the
# algorithm, but that is not what seems to be in the paper alg.
# -1 can be subtracted if we defined the distance between
# consecutive accesses (e.g. r, r) as 0 rather than 1.
sd = l - i # - 1
# push r to the end of stack_distances
stack_distances.insert(0, sd)
# remove r from its position and insert to the top of stack
rstack.pop(i) # rstack.remove(r)
rstack.insert(l - 1, r)
except ValueError: # not found #
sd = 0 # -1
# push r to the end of stack_distances/line_accesses
stack_distances.insert(0, sd)
line_accesses.insert(0, r)
# push r to the top of stack
rstack.insert(l, r)
if enable_padding:
# WARNING: notice that as the ratio between the number of samples (l)
# and cardinality (c) of a sample increases the probability of
# generating a sample gets smaller and smaller because there are
# few new samples compared to repeated samples. This means that for a
# long trace with relatively small cardinality it will take longer to
# generate all new samples and therefore obtain full distribution support
# and hence it takes longer for distribution to resemble the original.
# Therefore, we may pad the number of new samples to be on par with
# average number of samples l/c artificially.
l = len(stack_distances)
c = max(stack_distances)
padding = int(np.ceil(l / c))
stack_distances = stack_distances + [0] * padding
return (rstack, stack_distances, line_accesses)
# auxiliary read/write routines
def read_trace_from_file(file_path):
try:
with open(file_path) as f:
if args.trace_file_binary_type:
array = np.fromfile(f, dtype=np.uint64)
trace = array.astype(np.uint64).tolist()
else:
line = f.readline()
trace = list(map(lambda x: np.uint64(x), line.split(", ")))
return trace
except Exception:
print("ERROR: no input trace file has been provided")
def write_trace_to_file(file_path, trace):
try:
if args.trace_file_binary_type:
with open(file_path, "wb+") as f:
np.array(trace).astype(np.uint64).tofile(f)
else:
with open(file_path, "w+") as f:
s = str(trace)
f.write(s[1 : len(s) - 1])
except Exception:
print("ERROR: no output trace file has been provided")
def read_dist_from_file(file_path):
try:
with open(file_path, "r") as f:
lines = f.read().splitlines()
except Exception:
print("Wrong file or file path")
# read unique accesses
unique_accesses = [int(el) for el in lines[0].split(", ")]
# read cumulative distribution (elements are passed as two separate lists)
list_sd = [int(el) for el in lines[1].split(", ")]
cumm_sd = [float(el) for el in lines[2].split(", ")]
return unique_accesses, list_sd, cumm_sd
def write_dist_to_file(file_path, unique_accesses, list_sd, cumm_sd):
try:
with open(file_path, "w") as f:
# unique_acesses
s = str(unique_accesses)
f.write(s[1 : len(s) - 1] + "\n")
# list_sd
s = str(list_sd)
f.write(s[1 : len(s) - 1] + "\n")
# cumm_sd
s = str(cumm_sd)
f.write(s[1 : len(s) - 1] + "\n")
except Exception:
print("Wrong file or file path")
if __name__ == "__main__":
import sys
import operator
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(description="Generate Synthetic Distributions")
parser.add_argument("--trace-file", type=str, default="./input/trace.log")
parser.add_argument("--trace-file-binary-type", type=bool, default=False)
parser.add_argument("--trace-enable-padding", type=bool, default=False)
parser.add_argument("--dist-file", type=str, default="./input/dist.log")
parser.add_argument(
"--synthetic-file", type=str, default="./input/trace_synthetic.log"
)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--print-precision", type=int, default=5)
args = parser.parse_args()
### some basic setup ###
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
### read trace ###
trace = read_trace_from_file(args.trace_file)
# print(trace)
### profile trace ###
(_, stack_distances, line_accesses) = trace_profile(
trace, args.trace_enable_padding
)
stack_distances.reverse()
line_accesses.reverse()
# print(line_accesses)
# print(stack_distances)
### compute probability distribution ###
# count items
l = len(stack_distances)
dc = sorted(
collections.Counter(stack_distances).items(), key=operator.itemgetter(0)
)
# create a distribution
list_sd = list(map(lambda tuple_x_k: tuple_x_k[0], dc)) # x = tuple_x_k[0]
dist_sd = list(
map(lambda tuple_x_k: tuple_x_k[1] / float(l), dc)
) # k = tuple_x_k[1]
cumm_sd = [] # np.cumsum(dc).tolist() #prefixsum
for i, (_, k) in enumerate(dc):
if i == 0:
cumm_sd.append(k / float(l))
else:
# add the 2nd element of the i-th tuple in the dist_sd list
cumm_sd.append(cumm_sd[i - 1] + (k / float(l)))
### write stack_distance and line_accesses to a file ###
write_dist_to_file(args.dist_file, line_accesses, list_sd, cumm_sd)
### generate correspondinf synthetic ###
# line_accesses, list_sd, cumm_sd = read_dist_from_file(args.dist_file)
synthetic_trace = trace_generate_lru(
line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding
)
# synthetic_trace = trace_generate_rand(
# line_accesses, list_sd, cumm_sd, len(trace), args.trace_enable_padding
# )
write_trace_to_file(args.synthetic_file, synthetic_trace)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: an implementation of a deep learning recommendation model (DLRM)
# The model input consists of dense and sparse features. The former is a vector
# of floating point values. The latter is a list of sparse indices into
# embedding tables, which consist of vectors of floating point values.
# The selected vectors are passed to mlp networks denoted by triangles,
# in some cases the vectors are interacted through operators (Ops).
#
# output:
# vector of values
# model: |
# /\
# /__\
# |
# _____________________> Op <___________________
# / | \
# /\ /\ /\
# /__\ /__\ ... /__\
# | | |
# | Op Op
# | ____/__\_____ ____/__\____
# | |_Emb_|____|__| ... |_Emb_|__|___|
# input:
# [ dense features ] [sparse indices] , ..., [sparse indices]
#
# More precise definition of model layers:
# 1) fully connected layers of an mlp
# z = f(y)
# y = Wx + b
#
# 2) embedding lookup (for a list of sparse indices p=[p1,...,pk])
# z = Op(e1,...,ek)
# obtain vectors e1=E[:,p1], ..., ek=E[:,pk]
#
# 3) Operator Op can be one of the following
# Sum(e1,...,ek) = e1 + ... + ek
# Dot(e1,...,ek) = [e1'e1, ..., e1'ek, ..., ek'e1, ..., ek'ek]
# Cat(e1,...,ek) = [e1', ..., ek']'
# where ' denotes transpose operation
#
# References:
# [1] Maxim Naumov, Dheevatsa Mudigere, Hao-Jun Michael Shi, Jianyu Huang,
# Narayanan Sundaram, Jongsoo Park, Xiaodong Wang, Udit Gupta, Carole-Jean Wu,
# Alisson G. Azzolini, Dmytro Dzhulgakov, Andrey Mallevich, Ilia Cherniavskii,
# Yinghai Lu, Raghuraman Krishnamoorthi, Ansha Yu, Volodymyr Kondratenko,
# Stephanie Pereira, Xianjie Chen, Wenlin Chen, Vijay Rao, Bill Jia, Liang Xiong,
# Misha Smelyanskiy, "Deep Learning Recommendation Model for Personalization and
# Recommendation Systems", CoRR, arXiv:1906.00091, 2019
from __future__ import absolute_import, division, print_function, unicode_literals
# miscellaneous
import builtins
import functools
# import bisect
# import shutil
import time
import json
# data generation
from . import dlrm_data_pytorch as dp
# numpy
import numpy as np
# onnx
# The onnx import causes deprecation warnings every time workers
# are spawned during testing. So, we filter out those warnings.
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import onnx
# pytorch
import torch
import torch.nn as nn
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.scatter_gather import gather, scatter
# quotient-remainder trick
from .tricks.qr_embedding_bag import QREmbeddingBag
# mixed-dimension trick
from .tricks.md_embedding_bag import PrEmbeddingBag, md_solver
import sklearn.metrics
# from torchviz import make_dot
# import torch.nn.functional as Functional
# from torch.nn.parameter import Parameter
from torch.optim.lr_scheduler import _LRScheduler
exc = getattr(builtins, "IOError", "FileNotFoundError")
class LRPolicyScheduler(_LRScheduler):
def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps):
self.num_warmup_steps = num_warmup_steps
self.decay_start_step = decay_start_step
self.decay_end_step = decay_start_step + num_decay_steps
self.num_decay_steps = num_decay_steps
if self.decay_start_step < self.num_warmup_steps:
sys.exit("Learning rate warmup must finish before the decay starts")
super(LRPolicyScheduler, self).__init__(optimizer)
def get_lr(self):
step_count = self._step_count
if step_count < self.num_warmup_steps:
# warmup
scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps
lr = [base_lr * scale for base_lr in self.base_lrs]
self.last_lr = lr
elif self.decay_start_step <= step_count and step_count < self.decay_end_step:
# decay
decayed_steps = step_count - self.decay_start_step
scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2
min_lr = 0.0000001
lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs]
self.last_lr = lr
else:
if self.num_decay_steps > 0:
# freeze at last, either because we're after decay
# or because we're between warmup and decay
lr = self.last_lr
else:
# do not adjust
lr = self.base_lrs
return lr
### define dlrm in PyTorch ###
class DLRM_Net(nn.Module):
def create_mlp(self, ln, sigmoid_layer):
# build MLP layer by layer
layers = nn.ModuleList()
for i in range(0, ln.size - 1):
n = ln[i]
m = ln[i + 1]
# construct fully connected operator
LL = nn.Linear(int(n), int(m), bias=True)
# initialize the weights
# with torch.no_grad():
# custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
# approach 1
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
# approach 2
# LL.weight.data.copy_(torch.tensor(W))
# LL.bias.data.copy_(torch.tensor(bt))
# approach 3
# LL.weight = Parameter(torch.tensor(W),requires_grad=True)
# LL.bias = Parameter(torch.tensor(bt),requires_grad=True)
layers.append(LL)
# construct sigmoid or relu operator
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
# approach 1: use ModuleList
# return layers
# approach 2: use Sequential container to wrap all layers
return torch.nn.Sequential(*layers)
def create_emb(self, m, ln):
emb_l = nn.ModuleList()
for i in range(0, ln.size):
n = ln[i]
# construct embedding operator
if self.qr_flag and n > self.qr_threshold:
EE = QREmbeddingBag(n, m, self.qr_collisions,
operation=self.qr_operation, mode="sum", sparse=True)
elif self.md_flag:
base = max(m)
_m = m[i] if n > self.md_threshold else base
EE = PrEmbeddingBag(n, _m, base)
# use np initialization as below for consistency...
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m)
).astype(np.float32)
EE.embs.weight.data = torch.tensor(W, requires_grad=True)
else:
EE = nn.EmbeddingBag(n, m, mode="sum", sparse=True)
# initialize embeddings
# nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n))
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)
).astype(np.float32)
# approach 1
EE.weight.data = torch.tensor(W, requires_grad=True)
# approach 2
# EE.weight.data.copy_(torch.tensor(W))
# approach 3
# EE.weight = Parameter(torch.tensor(W),requires_grad=True)
emb_l.append(EE)
return emb_l
def __init__(
self,
m_spa=None,
ln_emb=None,
ln_bot=None,
ln_top=None,
arch_interaction_op=None,
arch_interaction_itself=False,
sigmoid_bot=-1,
sigmoid_top=-1,
sync_dense_params=True,
loss_threshold=0.0,
ndevices=-1,
qr_flag=False,
qr_operation="mult",
qr_collisions=0,
qr_threshold=200,
md_flag=False,
md_threshold=200,
):
super(DLRM_Net, self).__init__()
if (
(m_spa is not None)
and (ln_emb is not None)
and (ln_bot is not None)
and (ln_top is not None)
and (arch_interaction_op is not None)
):
# save arguments
self.ndevices = ndevices
self.output_d = 0
self.parallel_model_batch_size = -1
self.parallel_model_is_not_prepared = True
self.arch_interaction_op = arch_interaction_op
self.arch_interaction_itself = arch_interaction_itself
self.sync_dense_params = sync_dense_params
self.loss_threshold = loss_threshold
# create variables for QR embedding if applicable
self.qr_flag = qr_flag
if self.qr_flag:
self.qr_collisions = qr_collisions
self.qr_operation = qr_operation
self.qr_threshold = qr_threshold
# create variables for MD embedding if applicable
self.md_flag = md_flag
if self.md_flag:
self.md_threshold = md_threshold
# create operators
if ndevices <= 1:
self.emb_l = self.create_emb(m_spa, ln_emb)
self.bot_l = self.create_mlp(ln_bot, sigmoid_bot)
self.top_l = self.create_mlp(ln_top, sigmoid_top)
def apply_mlp(self, x, layers):
# approach 1: use ModuleList
# for layer in layers:
# x = layer(x)
# return x
# approach 2: use Sequential container to wrap all layers
return layers(x)
def apply_emb(self, lS_o, lS_i, emb_l):
# WARNING: notice that we are processing the batch at once. We implicitly
# assume that the data is laid out such that:
# 1. each embedding is indexed with a group of sparse indices,
# corresponding to a single lookup
# 2. for each embedding the lookups are further organized into a batch
# 3. for a list of embedding tables there is a list of batched lookups
ly = []
# for k, sparse_index_group_batch in enumerate(lS_i):
for k in range(len(lS_i)):
sparse_index_group_batch = lS_i[k]
sparse_offset_group_batch = lS_o[k]
# embedding lookup
# We are using EmbeddingBag, which implicitly uses sum operator.
# The embeddings are represented as tall matrices, with sum
# happening vertically across 0 axis, resulting in a row vector
E = emb_l[k]
V = E(sparse_index_group_batch, sparse_offset_group_batch)
ly.append(V)
# print(ly)
return ly
def interact_features(self, x, ly):
if self.arch_interaction_op == "dot":
# concatenate dense and sparse features
(batch_size, d) = x.shape
T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
# perform a dot product
Z = torch.bmm(T, torch.transpose(T, 1, 2))
# append dense feature with the interactions (into a row vector)
# approach 1: all
# Zflat = Z.view((batch_size, -1))
# approach 2: unique
_, ni, nj = Z.shape
# approach 1: tril_indices
# offset = 0 if self.arch_interaction_itself else -1
# li, lj = torch.tril_indices(ni, nj, offset=offset)
# approach 2: custom
offset = 1 if self.arch_interaction_itself else 0
li = torch.tensor([i for i in range(ni) for j in range(i + offset)], device=x.device)
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)], device=x.device)
Zflat = Z[:, li, lj]
# concatenate dense features and interactions
R = torch.cat([x] + [Zflat], dim=1)
elif self.arch_interaction_op == "cat":
# concatenation features (into a row vector)
R = torch.cat([x] + ly, dim=1)
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ self.arch_interaction_op
+ " is not supported"
)
return R
def forward(self, dense_x, lS_o, lS_i):
if self.ndevices <= 1:
return self.sequential_forward(dense_x, lS_o, lS_i)
else:
return self.parallel_forward(dense_x, lS_o, lS_i)
def sequential_forward(self, dense_x, lS_o, lS_i):
# process dense features (using bottom mlp), resulting in a row vector
x = self.apply_mlp(dense_x, self.bot_l)
# debug prints
# print("intermediate")
# print(x.detach().cpu().numpy())
# process sparse features(using embeddings), resulting in a list of row vectors
ly = self.apply_emb(lS_o, lS_i, self.emb_l)
# for y in ly:
# print(y.detach().cpu().numpy())
# interact features (dense and sparse)
z = self.interact_features(x, ly)
# print(z.detach().cpu().numpy())
# obtain probability of a click (using top mlp)
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def parallel_forward(self, dense_x, lS_o, lS_i):
### prepare model (overwrite) ###
# WARNING: # of devices must be >= batch size in parallel_forward call
batch_size = dense_x.size()[0]
ndevices = min(self.ndevices, batch_size, len(self.emb_l))
device_ids = range(ndevices)
# WARNING: must redistribute the model if mini-batch size changes(this is common
# for last mini-batch, when # of elements in the dataset/batch size is not even
if self.parallel_model_batch_size != batch_size:
self.parallel_model_is_not_prepared = True
if self.parallel_model_is_not_prepared or self.sync_dense_params:
# replicate mlp (data parallelism)
self.bot_l_replicas = replicate(self.bot_l, device_ids)
self.top_l_replicas = replicate(self.top_l, device_ids)
self.parallel_model_batch_size = batch_size
if self.parallel_model_is_not_prepared:
# distribute embeddings (model parallelism)
t_list = []
for k, emb in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
emb.to(d)
t_list.append(emb.to(d))
self.emb_l = nn.ModuleList(t_list)
self.parallel_model_is_not_prepared = False
### prepare input (overwrite) ###
# scatter dense features (data parallelism)
# print(dense_x.device)
dense_x = scatter(dense_x, device_ids, dim=0)
# distribute sparse features (model parallelism)
if (len(self.emb_l) != len(lS_o)) or (len(self.emb_l) != len(lS_i)):
sys.exit("ERROR: corrupted model input detected in parallel_forward call")
t_list = []
i_list = []
for k, _ in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
t_list.append(lS_o[k].to(d))
i_list.append(lS_i[k].to(d))
lS_o = t_list
lS_i = i_list
### compute results in parallel ###
# bottom mlp
# WARNING: Note that the self.bot_l is a list of bottom mlp modules
# that have been replicated across devices, while dense_x is a tuple of dense
# inputs that has been scattered across devices on the first (batch) dimension.
# The output is a list of tensors scattered across devices according to the
# distribution of dense_x.
x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids)
# debug prints
# print(x)
# embeddings
ly = self.apply_emb(lS_o, lS_i, self.emb_l)
# debug prints
# print(ly)
# butterfly shuffle (implemented inefficiently for now)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each device. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each device.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if len(self.emb_l) != len(ly):
sys.exit("ERROR: corrupted intermediate result in parallel_forward call")
t_list = []
for k, _ in enumerate(self.emb_l):
d = torch.device("cuda:" + str(k % ndevices))
y = scatter(ly[k], device_ids, dim=0)
t_list.append(y)
# adjust the list to be ordered per device
ly = list(map(lambda y: list(y), zip(*t_list)))
# debug prints
# print(ly)
# interactions
z = []
for k in range(ndevices):
zk = self.interact_features(x[k], ly[k])
z.append(zk)
# debug prints
# print(z)
# top mlp
# WARNING: Note that the self.top_l is a list of top mlp modules that
# have been replicated across devices, while z is a list of interaction results
# that by construction are scattered across devices on the first (batch) dim.
# The output is a list of tensors scattered across devices according to the
# distribution of z.
p = parallel_apply(self.top_l_replicas, z, None, device_ids)
### gather the distributed results ###
p0 = gather(p, self.output_d, dim=0)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z0 = torch.clamp(
p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold)
)
else:
z0 = p0
return z0
def dash_separated_ints(value):
vals = value.split('-')
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value)
return value
def dash_separated_floats(value):
vals = value.split('-')
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value)
return value
if __name__ == "__main__":
### import packages ###
import sys
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
parser.add_argument(
"--arch-embedding-size", type=dash_separated_ints, default="4-3-2")
# j will be replaced with the table number
parser.add_argument(
"--arch-mlp-bot", type=dash_separated_ints, default="4-3-2")
parser.add_argument(
"--arch-mlp-top", type=dash_separated_ints, default="4-2-1")
parser.add_argument(
"--arch-interaction-op", type=str, choices=['dot', 'cat'], default="dot")
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
# embedding table options
parser.add_argument("--md-flag", action="store_true", default=False)
parser.add_argument("--md-threshold", type=int, default=200)
parser.add_argument("--md-temperature", type=float, default=0.3)
parser.add_argument("--md-round-dims", action="store_true", default=False)
parser.add_argument("--qr-flag", action="store_true", default=False)
parser.add_argument("--qr-threshold", type=int, default=200)
parser.add_argument("--qr-operation", type=str, default="mult")
parser.add_argument("--qr-collisions", type=int, default=4)
# activations and loss
parser.add_argument("--activation-function", type=str, default="relu")
parser.add_argument("--loss-function", type=str, default="mse") # or bce or wbce
parser.add_argument(
"--loss-weights", type=dash_separated_floats, default="1.0-1.0") # for wbce
parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7
parser.add_argument("--round-targets", type=bool, default=False)
# data
parser.add_argument("--data-size", type=int, default=1)
parser.add_argument("--num-batches", type=int, default=0)
parser.add_argument(
"--data-generation", type=str, default="random"
) # synthetic or dataset
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--data-trace-enable-padding", type=bool, default=False)
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-indices-per-lookup", type=int, default=10)
parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--memory-map", action="store_true", default=False)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.01)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--sync-dense-params", type=bool, default=True)
# inference
parser.add_argument("--inference-only", action="store_true", default=False)
# onnx
parser.add_argument("--save-onnx", action="store_true", default=False)
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=-1)
parser.add_argument("--test-mini-batch-size", type=int, default=-1)
parser.add_argument("--test-num-workers", type=int, default=-1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--plot-compute-graph", action="store_true", default=False)
# store/load model
parser.add_argument("--save-model", type=str, default="")
parser.add_argument("--load-model", type=str, default="")
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
# stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107
parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0)
# stop at target AUC Terabyte (no subsampling) 0.8025
parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action='store_true', default=False)
parser.add_argument("--mlperf-bin-shuffle", action='store_true', default=False)
# LR policy
parser.add_argument("--lr-num-warmup-steps", type=int, default=0)
parser.add_argument("--lr-decay-start-step", type=int, default=0)
parser.add_argument("--lr-num-decay-steps", type=int, default=0)
args = parser.parse_args()
if args.mlperf_logging:
print('command line args: ', json.dumps(vars(args)))
### some basic setup ###
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
torch.set_printoptions(precision=args.print_precision)
torch.manual_seed(args.numpy_rand_seed)
if (args.test_mini_batch_size < 0):
# if the parameter is not set, use the training batch size
args.test_mini_batch_size = args.mini_batch_size
if (args.test_num_workers < 0):
# if the parameter is not set, use the same parameter for training
args.test_num_workers = args.num_workers
use_gpu = args.use_gpu and torch.cuda.is_available()
if use_gpu:
torch.cuda.manual_seed_all(args.numpy_rand_seed)
torch.backends.cudnn.deterministic = True
device = torch.device("cuda", 0)
ngpus = torch.cuda.device_count() # 1
print("Using {} GPU(s)...".format(ngpus))
else:
device = torch.device("cpu")
print("Using CPU...")
### prepare training data ###
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
# input data
if (args.data_generation == "dataset"):
train_data, train_ld, test_data, test_ld = \
dp.make_criteo_data_and_loaders(args)
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
ln_emb = train_data.counts
# enforce maximum limit on number of vectors per embedding
if args.max_ind_range > 0:
ln_emb = np.array(list(map(
lambda x: x if x < args.max_ind_range else args.max_ind_range,
ln_emb
)))
m_den = train_data.m_den
ln_bot[0] = m_den
else:
# input and target at random
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
m_den = ln_bot[0]
train_data, train_ld = dp.make_random_data_and_loader(args, ln_emb, m_den)
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
### parse command line arguments ###
m_spa = args.arch_sparse_feature_size
num_fea = ln_emb.size + 1 # num sparse + num dense features
m_den_out = ln_bot[ln_bot.size - 1]
if args.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ args.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sanity check: feature sizes and mlp dimensions must match
if m_den != ln_bot[0]:
sys.exit(
"ERROR: arch-dense-feature-size "
+ str(m_den)
+ " does not match first dim of bottom mlp "
+ str(ln_bot[0])
)
if args.qr_flag:
if args.qr_operation == "concat" and 2 * m_spa != m_den_out:
sys.exit(
"ERROR: 2 arch-sparse-feature-size "
+ str(2 * m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
+ " (note that the last dim of bottom mlp must be 2x the embedding dim)"
)
if args.qr_operation != "concat" and m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
else:
if m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
if num_int != ln_top[0]:
sys.exit(
"ERROR: # of feature interactions "
+ str(num_int)
+ " does not match first dimension of top mlp "
+ str(ln_top[0])
)
# assign mixed dimensions if applicable
if args.md_flag:
m_spa = md_solver(
torch.tensor(ln_emb),
args.md_temperature, # alpha
d0=m_spa,
round_dim=args.md_round_dims
).tolist()
# test prints (model arch)
if args.debug_mode:
print("model arch:")
print(
"mlp top arch "
+ str(ln_top.size - 1)
+ " layers, with input to output dimensions:"
)
print(ln_top)
print("# of interactions")
print(num_int)
print(
"mlp bot arch "
+ str(ln_bot.size - 1)
+ " layers, with input to output dimensions:"
)
print(ln_bot)
print("# of features (sparse and dense)")
print(num_fea)
print("dense feature size")
print(m_den)
print("sparse feature size")
print(m_spa)
print(
"# of embeddings (= # of sparse features) "
+ str(ln_emb.size)
+ ", with dimensions "
+ str(m_spa)
+ "x:"
)
print(ln_emb)
print("data (inputs and targets):")
for j, (X, lS_o, lS_i, T) in enumerate(train_ld):
# early exit if nbatches was set by the user and has been exceeded
if nbatches > 0 and j >= nbatches:
break
print("mini-batch: %d" % j)
print(X.detach().cpu().numpy())
# transform offsets to lengths when printing
print(
[
np.diff(
S_o.detach().cpu().tolist() + list(lS_i[i].shape)
).tolist()
for i, S_o in enumerate(lS_o)
]
)
print([S_i.detach().cpu().tolist() for S_i in lS_i])
print(T.detach().cpu().numpy())
ndevices = min(ngpus, args.mini_batch_size, num_fea - 1) if use_gpu else -1
### construct the neural network specified above ###
# WARNING: to obtain exactly the same initialization for
# the weights we need to start from the same random seed.
# np.random.seed(args.numpy_rand_seed)
dlrm = DLRM_Net(
m_spa,
ln_emb,
ln_bot,
ln_top,
arch_interaction_op=args.arch_interaction_op,
arch_interaction_itself=args.arch_interaction_itself,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 2,
sync_dense_params=args.sync_dense_params,
loss_threshold=args.loss_threshold,
ndevices=ndevices,
qr_flag=args.qr_flag,
qr_operation=args.qr_operation,
qr_collisions=args.qr_collisions,
qr_threshold=args.qr_threshold,
md_flag=args.md_flag,
md_threshold=args.md_threshold,
)
# test prints
if args.debug_mode:
print("initial parameters (weights and bias):")
for param in dlrm.parameters():
print(param.detach().cpu().numpy())
# print(dlrm)
if use_gpu:
# Custom Model-Data Parallel
# the mlps are replicated and use data parallelism, while
# the embeddings are distributed and use model parallelism
dlrm = dlrm.to(device) # .cuda()
if dlrm.ndevices > 1:
dlrm.emb_l = dlrm.create_emb(m_spa, ln_emb)
# specify the loss function
if args.loss_function == "mse":
loss_fn = torch.nn.MSELoss(reduction="mean")
elif args.loss_function == "bce":
loss_fn = torch.nn.BCELoss(reduction="mean")
elif args.loss_function == "wbce":
loss_ws = torch.tensor(np.fromstring(args.loss_weights, dtype=float, sep="-"))
loss_fn = torch.nn.BCELoss(reduction="none")
else:
sys.exit("ERROR: --loss-function=" + args.loss_function + " is not supported")
if not args.inference_only:
# specify the optimizer algorithm
optimizer = torch.optim.SGD(dlrm.parameters(), lr=args.learning_rate)
lr_scheduler = LRPolicyScheduler(optimizer, args.lr_num_warmup_steps, args.lr_decay_start_step,
args.lr_num_decay_steps)
### main loop ###
def time_wrap(use_gpu):
if use_gpu:
torch.cuda.synchronize()
return time.time()
def dlrm_wrap(X, lS_o, lS_i, use_gpu, device):
if use_gpu: # .cuda()
# lS_i can be either a list of tensors or a stacked tensor.
# Handle each case below:
lS_i = [S_i.to(device) for S_i in lS_i] if isinstance(lS_i, list) \
else lS_i.to(device)
lS_o = [S_o.to(device) for S_o in lS_o] if isinstance(lS_o, list) \
else lS_o.to(device)
return dlrm(
X.to(device),
lS_o,
lS_i
)
else:
return dlrm(X, lS_o, lS_i)
def loss_fn_wrap(Z, T, use_gpu, device):
if args.loss_function == "mse" or args.loss_function == "bce":
if use_gpu:
return loss_fn(Z, T.to(device))
else:
return loss_fn(Z, T)
elif args.loss_function == "wbce":
if use_gpu:
loss_ws_ = loss_ws[T.data.view(-1).long()].view_as(T).to(device)
loss_fn_ = loss_fn(Z, T.to(device))
else:
loss_ws_ = loss_ws[T.data.view(-1).long()].view_as(T)
loss_fn_ = loss_fn(Z, T.to(device))
loss_sc_ = loss_ws_ * loss_fn_
# debug prints
# print(loss_ws_)
# print(loss_fn_)
return loss_sc_.mean()
# training or inference
best_gA_test = 0
best_auc_test = 0
skip_upto_epoch = 0
skip_upto_batch = 0
total_time = 0
total_loss = 0
total_accu = 0
total_iter = 0
total_samp = 0
k = 0
# Load model is specified
if not (args.load_model == ""):
print("Loading saved model {}".format(args.load_model))
if use_gpu:
if dlrm.ndevices > 1:
# NOTE: when targeting inference on multiple GPUs,
# load the model as is on CPU or GPU, with the move
# to multiple GPUs to be done in parallel_forward
ld_model = torch.load(args.load_model)
else:
# NOTE: when targeting inference on single GPU,
# note that the call to .to(device) has already happened
ld_model = torch.load(
args.load_model,
map_location=torch.device('cuda')
# map_location=lambda storage, loc: storage.cuda(0)
)
else:
# when targeting inference on CPU
ld_model = torch.load(args.load_model, map_location=torch.device('cpu'))
dlrm.load_state_dict(ld_model["state_dict"])
ld_j = ld_model["iter"]
ld_k = ld_model["epoch"]
ld_nepochs = ld_model["nepochs"]
ld_nbatches = ld_model["nbatches"]
ld_nbatches_test = ld_model["nbatches_test"]
ld_gA = ld_model["train_acc"]
ld_gL = ld_model["train_loss"]
ld_total_loss = ld_model["total_loss"]
ld_total_accu = ld_model["total_accu"]
ld_gA_test = ld_model["test_acc"]
ld_gL_test = ld_model["test_loss"]
if not args.inference_only:
optimizer.load_state_dict(ld_model["opt_state_dict"])
best_gA_test = ld_gA_test
total_loss = ld_total_loss
total_accu = ld_total_accu
skip_upto_epoch = ld_k # epochs
skip_upto_batch = ld_j # batches
else:
args.print_freq = ld_nbatches
args.test_freq = 0
print(
"Saved at: epoch = {:d}/{:d}, batch = {:d}/{:d}, ntbatch = {:d}".format(
ld_k, ld_nepochs, ld_j, ld_nbatches, ld_nbatches_test
)
)
print(
"Training state: loss = {:.6f}, accuracy = {:3.3f} %".format(
ld_gL, ld_gA * 100
)
)
print(
"Testing state: loss = {:.6f}, accuracy = {:3.3f} %".format(
ld_gL_test, ld_gA_test * 100
)
)
print("time/loss/accuracy (if enabled):")
with torch.autograd.profiler.profile(args.enable_profiling, use_gpu) as prof:
while k < args.nepochs:
if k < skip_upto_epoch:
continue
accum_time_begin = time_wrap(use_gpu)
if args.mlperf_logging:
previous_iteration_time = None
for j, (X, lS_o, lS_i, T) in enumerate(train_ld):
if j == 0 and args.save_onnx:
(X_onnx, lS_o_onnx, lS_i_onnx) = (X, lS_o, lS_i)
if j < skip_upto_batch:
continue
if args.mlperf_logging:
current_time = time_wrap(use_gpu)
if previous_iteration_time:
iteration_time = current_time - previous_iteration_time
else:
iteration_time = 0
previous_iteration_time = current_time
else:
t1 = time_wrap(use_gpu)
# early exit if nbatches was set by the user and has been exceeded
if nbatches > 0 and j >= nbatches:
break
'''
# debug prints
print("input and targets")
print(X.detach().cpu().numpy())
print([np.diff(S_o.detach().cpu().tolist()
+ list(lS_i[i].shape)).tolist() for i, S_o in enumerate(lS_o)])
print([S_i.detach().cpu().numpy().tolist() for S_i in lS_i])
print(T.detach().cpu().numpy())
'''
# forward pass
Z = dlrm_wrap(X, lS_o, lS_i, use_gpu, device)
# loss
E = loss_fn_wrap(Z, T, use_gpu, device)
'''
# debug prints
print("output and loss")
print(Z.detach().cpu().numpy())
print(E.detach().cpu().numpy())
'''
# compute loss and accuracy
L = E.detach().cpu().numpy() # numpy array
S = Z.detach().cpu().numpy() # numpy array
T = T.detach().cpu().numpy() # numpy array
mbs = T.shape[0] # = args.mini_batch_size except maybe for last
A = np.sum((np.round(S, 0) == T).astype(np.uint8))
if not args.inference_only:
# scaled error gradient propagation
# (where we do not accumulate gradients across mini-batches)
optimizer.zero_grad()
# backward pass
E.backward()
# debug prints (check gradient norm)
# for l in mlp.layers:
# if hasattr(l, 'weight'):
# print(l.weight.grad.norm().item())
# optimizer
optimizer.step()
lr_scheduler.step()
if args.mlperf_logging:
total_time += iteration_time
else:
t2 = time_wrap(use_gpu)
total_time += t2 - t1
total_accu += A
total_loss += L * mbs
total_iter += 1
total_samp += mbs
should_print = ((j + 1) % args.print_freq == 0) or (j + 1 == nbatches)
should_test = (
(args.test_freq > 0)
and (args.data_generation == "dataset")
and (((j + 1) % args.test_freq == 0) or (j + 1 == nbatches))
)
# print time, loss and accuracy
if should_print or should_test:
gT = 1000.0 * total_time / total_iter if args.print_time else -1
total_time = 0
gA = total_accu / total_samp
total_accu = 0
gL = total_loss / total_samp
total_loss = 0
str_run_type = "inference" if args.inference_only else "training"
print(
"Finished {} it {}/{} of epoch {}, {:.2f} ms/it, ".format(
str_run_type, j + 1, nbatches, k, gT
)
+ "loss {:.6f}, accuracy {:3.3f} %".format(gL, gA * 100)
)
# Uncomment the line below to print out the total time with overhead
# print("Accumulated time so far: {}" \
# .format(time_wrap(use_gpu) - accum_time_begin))
total_iter = 0
total_samp = 0
# testing
if should_test and not args.inference_only:
# don't measure training iter time in a test iteration
if args.mlperf_logging:
previous_iteration_time = None
test_accu = 0
test_loss = 0
test_samp = 0
accum_test_time_begin = time_wrap(use_gpu)
if args.mlperf_logging:
scores = []
targets = []
for i, (X_test, lS_o_test, lS_i_test, T_test) in enumerate(test_ld):
# early exit if nbatches was set by the user and was exceeded
if nbatches > 0 and i >= nbatches:
break
t1_test = time_wrap(use_gpu)
# forward pass
Z_test = dlrm_wrap(
X_test, lS_o_test, lS_i_test, use_gpu, device
)
if args.mlperf_logging:
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
scores.append(S_test)
targets.append(T_test)
else:
# loss
E_test = loss_fn_wrap(Z_test, T_test, use_gpu, device)
# compute loss and accuracy
L_test = E_test.detach().cpu().numpy() # numpy array
S_test = Z_test.detach().cpu().numpy() # numpy array
T_test = T_test.detach().cpu().numpy() # numpy array
mbs_test = T_test.shape[0] # = mini_batch_size except last
A_test = np.sum((np.round(S_test, 0) == T_test).astype(np.uint8))
test_accu += A_test
test_loss += L_test * mbs_test
test_samp += mbs_test
t2_test = time_wrap(use_gpu)
if args.mlperf_logging:
scores = np.concatenate(scores, axis=0)
targets = np.concatenate(targets, axis=0)
metrics = {
'loss' : sklearn.metrics.log_loss,
'recall' : lambda y_true, y_score:
sklearn.metrics.recall_score(
y_true=y_true,
y_pred=np.round(y_score)
),
'precision' : lambda y_true, y_score:
sklearn.metrics.precision_score(
y_true=y_true,
y_pred=np.round(y_score)
),
'f1' : lambda y_true, y_score:
sklearn.metrics.f1_score(
y_true=y_true,
y_pred=np.round(y_score)
),
'ap' : sklearn.metrics.average_precision_score,
'roc_auc' : sklearn.metrics.roc_auc_score,
'accuracy' : lambda y_true, y_score:
sklearn.metrics.accuracy_score(
y_true=y_true,
y_pred=np.round(y_score)
),
# 'pre_curve' : sklearn.metrics.precision_recall_curve,
# 'roc_curve' : sklearn.metrics.roc_curve,
}
# print("Compute time for validation metric : ", end="")
# first_it = True
validation_results = {}
for metric_name, metric_function in metrics.items():
# if first_it:
# first_it = False
# else:
# print(", ", end="")
# metric_compute_start = time_wrap(False)
validation_results[metric_name] = metric_function(
targets,
scores
)
# metric_compute_end = time_wrap(False)
# met_time = metric_compute_end - metric_compute_start
# print("{} {:.4f}".format(metric_name, 1000 * (met_time)),
# end="")
# print(" ms")
gA_test = validation_results['accuracy']
gL_test = validation_results['loss']
else:
gA_test = test_accu / test_samp
gL_test = test_loss / test_samp
is_best = gA_test > best_gA_test
if is_best:
best_gA_test = gA_test
if not (args.save_model == ""):
print("Saving model to {}".format(args.save_model))
torch.save(
{
"epoch": k,
"nepochs": args.nepochs,
"nbatches": nbatches,
"nbatches_test": nbatches_test,
"iter": j + 1,
"state_dict": dlrm.state_dict(),
"train_acc": gA,
"train_loss": gL,
"test_acc": gA_test,
"test_loss": gL_test,
"total_loss": total_loss,
"total_accu": total_accu,
"opt_state_dict": optimizer.state_dict(),
},
args.save_model,
)
if args.mlperf_logging:
is_best = validation_results['roc_auc'] > best_auc_test
if is_best:
best_auc_test = validation_results['roc_auc']
print(
"Testing at - {}/{} of epoch {},".format(j + 1, nbatches, k)
+ " loss {:.6f}, recall {:.4f}, precision {:.4f},".format(
validation_results['loss'],
validation_results['recall'],
validation_results['precision']
)
+ " f1 {:.4f}, ap {:.4f},".format(
validation_results['f1'],
validation_results['ap'],
)
+ " auc {:.4f}, best auc {:.4f},".format(
validation_results['roc_auc'],
best_auc_test
)
+ " accuracy {:3.3f} %, best accuracy {:3.3f} %".format(
validation_results['accuracy'] * 100,
best_gA_test * 100
)
)
else:
print(
"Testing at - {}/{} of epoch {},".format(j + 1, nbatches, 0)
+ " loss {:.6f}, accuracy {:3.3f} %, best {:3.3f} %".format(
gL_test, gA_test * 100, best_gA_test * 100
)
)
# Uncomment the line below to print out the total time with overhead
# print("Total test time for this group: {}" \
# .format(time_wrap(use_gpu) - accum_test_time_begin))
if (args.mlperf_logging
and (args.mlperf_acc_threshold > 0)
and (best_gA_test > args.mlperf_acc_threshold)):
print("MLPerf testing accuracy threshold "
+ str(args.mlperf_acc_threshold)
+ " reached, stop training")
break
if (args.mlperf_logging
and (args.mlperf_auc_threshold > 0)
and (best_auc_test > args.mlperf_auc_threshold)):
print("MLPerf testing auc threshold "
+ str(args.mlperf_auc_threshold)
+ " reached, stop training")
break
k += 1 # nepochs
# profiling
if args.enable_profiling:
with open("dlrm_s_pytorch.prof", "w") as prof_f:
prof_f.write(prof.key_averages().table(sort_by="cpu_time_total"))
prof.export_chrome_trace("./dlrm_s_pytorch.json")
# print(prof.key_averages().table(sort_by="cpu_time_total"))
# plot compute graph
if args.plot_compute_graph:
sys.exit(
"ERROR: Please install pytorchviz package in order to use the"
+ " visualization. Then, uncomment its import above as well as"
+ " three lines below and run the code again."
)
# V = Z.mean() if args.inference_only else E
# dot = make_dot(V, params=dict(dlrm.named_parameters()))
# dot.render('dlrm_s_pytorch_graph') # write .pdf file
# test prints
if not args.inference_only and args.debug_mode:
print("updated parameters (weights and bias):")
for param in dlrm.parameters():
print(param.detach().cpu().numpy())
# export the model in onnx
if args.save_onnx:
dlrm_pytorch_onnx_file = "dlrm_s_pytorch.onnx"
batch_size = X_onnx.shape[0]
# debug prints
# print("batch_size", batch_size)
# print("inputs", X_onnx, lS_o_onnx, lS_i_onnx)
# print("output", dlrm_wrap(X_onnx, lS_o_onnx, lS_i_onnx, use_gpu, device))
# force list conversion
# if torch.is_tensor(lS_o_onnx):
# lS_o_onnx = [lS_o_onnx[j] for j in range(len(lS_o_onnx))]
# if torch.is_tensor(lS_i_onnx):
# lS_i_onnx = [lS_i_onnx[j] for j in range(len(lS_i_onnx))]
# force tensor conversion
# if isinstance(lS_o_onnx, list):
# lS_o_onnx = torch.stack(lS_o_onnx)
# if isinstance(lS_i_onnx, list):
# lS_i_onnx = torch.stack(lS_i_onnx)
# debug prints
print("X_onnx.shape", X_onnx.shape)
if torch.is_tensor(lS_o_onnx):
print("lS_o_onnx.shape", lS_o_onnx.shape)
else:
for oo in lS_o_onnx:
print("oo.shape", oo.shape)
if torch.is_tensor(lS_i_onnx):
print("lS_i_onnx.shape", lS_i_onnx.shape)
else:
for ii in lS_i_onnx:
print("ii.shape", ii.shape)
# name inputs and outputs
o_inputs = ["offsets"] if torch.is_tensor(lS_o_onnx) else ["offsets_"+str(i) for i in range(len(lS_o_onnx))]
i_inputs = ["indices"] if torch.is_tensor(lS_i_onnx) else ["indices_"+str(i) for i in range(len(lS_i_onnx))]
all_inputs = ["dense_x"] + o_inputs + i_inputs
#debug prints
print("inputs", all_inputs)
# create dynamic_axis dictionaries
do_inputs = [{'offsets': {1 : 'batch_size' }}] if torch.is_tensor(lS_o_onnx) else [{"offsets_"+str(i) :{0 : 'batch _size'}} for i in range(len(lS_o_onnx))]
di_inputs = [{'indices': {1 : 'batch_size' }}] if torch.is_tensor(lS_i_onnx) else [{"indices_"+str(i) :{0 : 'batch _size'}} for i in range(len(lS_i_onnx))]
dynamic_axes = {'dense_x' : {0 : 'batch _size'}, 'pred' : {0 : 'batch_size'}}
for do in do_inputs:
dynamic_axes.update(do)
for di in di_inputs:
dynamic_axes.update(di)
# debug prints
print(dynamic_axes)
# export model
torch.onnx.export(
dlrm, (X_onnx, lS_o_onnx, lS_i_onnx), dlrm_pytorch_onnx_file, verbose=True, use_external_data_format=True, opset_version=11, input_names=all_inputs, output_names=["pred"], dynamic_axes=dynamic_axes
)
# recover the model back
dlrm_pytorch_onnx = onnx.load(dlrm_pytorch_onnx_file)
# check the onnx model
onnx.checker.check_model(dlrm_pytorch_onnx)
'''
# run model using onnxruntime
import onnxruntime as rt
dict_inputs = {}
dict_inputs["dense_x"] = X_onnx.numpy().astype(np.float32)
if torch.is_tensor(lS_o_onnx):
dict_inputs["offsets"] = lS_o_onnx.numpy().astype(np.int64)
else:
for i in range(len(lS_o_onnx)):
dict_inputs["offsets_"+str(i)] = lS_o_onnx[i].numpy().astype(np.int64)
if torch.is_tensor(lS_i_onnx):
dict_inputs["indices"] = lS_i_onnx.numpy().astype(np.int64)
else:
for i in range(len(lS_i_onnx)):
dict_inputs["indices_"+str(i)] = lS_i_onnx[i].numpy().astype(np.int64)
print("dict_inputs", dict_inputs)
sess = rt.InferenceSession(dlrm_pytorch_onnx_file, rt.SessionOptions())
prediction = sess.run(output_names=["pred"], input_feed=dict_inputs)
print("prediction", prediction)
'''
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#
# This script performs the visualization of the embedding tables created in
# DLRM during the training procedure. We use two popular techniques for
# visualization: umap (https://umap-learn.readthedocs.io/en/latest/) and
# tsne (https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html).
# These links also provide instructions on how to install these packages
# in different environments.
#
# Warning: the size of the data to be visualized depends on the RAM on your machine.
#
#
# Connand line examples:
#
# Full analysis of embeddings and data representations for Criteo Kaggle data:
# $python ./tools/visualize.py --data-set=kaggle --load-model=../dlrm-2020-05-25/criteo.pytorch-e-0-i-110591
# --raw-data-file=../../criteo/input/train.txt --skip-categorical-analysis
# --processed-data-file=../../criteo/input/kaggleAdDisplayChallenge_processed.npz
#
#
# To run just the analysis of categoricala data for Criteo Kaggle data set:
# $python ./tools/visualize.py --data-set=kaggle --load-model=../dlrm-2020-05-25/criteo.pytorch-e-0-i-110591 \
# --raw-data-file=../../criteo/input/train.txt --data-randomize=none --processed-data-file=../../criteo/input/kaggleAdDisplayChallenge_processed.npz \
# --skip-embedding --skip-data-plots
#
#
# The following command line arguments are available to the user:
#
# --load-model - DLRM model file
# --data-set - one of ["kaggle", "terabyte"]
# --max-ind-range - max index range used during the traning
# --output-dir - output directory, if not specified, it will be traeted from the model and datset names
# --max-umap-size - max number of points to visualize using UMAP, default=50000
# --use-tsne - use T-SNE
# --max-tsne-size - max number of points to visualize using T-SNE, default=1000)
# --skip-embedding - skips analysis of embedding tables
# --umap-metric - metric for UMAP
# --skip-data-plots - skips data plots
# --skip-categorical-analysis - skips categorical analysis
#
# # data file related
# --raw-data-file
# --processed-data-file
# --data-sub-sample-rate
# --data-randomize
# --memory-map
# --mini-batch-size
# --num-workers
# --test-mini-batch-size
# --test-num-workers
# --num-batches
# --mlperf-logging
import os
import sys
import argparse
import numpy as np
import umap
import hdbscan
import json
import torch
import math
import matplotlib
import matplotlib.pyplot as plt
import collections
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn import manifold
import dlrm_data_pytorch as dp
from dlrm_s_pytorch import DLRM_Net
def visualize_embeddings_umap(emb_l,
output_dir = "",
max_size = 500000,
umap_metric = "euclidean",
cat_counts = None,
use_max_count = True):
for k in range(0, len(emb_l)):
E = emb_l[k].weight.detach().cpu().numpy()
print("umap", E.shape)
# create histogram of norms
bins = 50
norms = [np.linalg.norm(E[i], ord=2) for i in range(0,E.shape[0])]
# plt.hist(norms, bins = bins)
# plt.title("Cat norm hist var. "+str(k))
hist, bins = np.histogram(norms, bins=bins)
logbins = np.logspace(np.log10(bins[0]),np.log10(bins[-1]),len(bins))
plt.figure(figsize=(8,8))
plt.title("Categorical norms: " + str(k) + " cardinality " + str(len(cat_counts[k])))
plt.hist(norms, bins=logbins)
plt.xscale("log")
# plt.legend()
plt.savefig(output_dir+"/cat-norm-histogram-"+str(k)+".png")
plt.close()
if E.shape[0] < 20:
print("Skipping small embedding")
continue
n_vis = min(max_size, E.shape[0])
min_cnt = 0
# reducer = umap.UMAP(random_state=42, n_neighbors=25, min_dist=0.1)
reducer = umap.UMAP(random_state=42, metric=umap_metric)
if use_max_count is False or n_vis == E.shape[0]:
Y = reducer.fit_transform(E[:n_vis,:])
else:
# select values with couns > 1
done = False
min_cnt = 1
while done == False:
el_cnt = (cat_counts[k] > min_cnt).sum()
if el_cnt <= max_size:
done = True
else:
min_cnt = min_cnt+1
E1= []
for i in range(0, E.shape[0]):
if cat_counts[k][i] > min_cnt:
E1.append(E[i,:])
print("max_count_len", len(E1), "mincount", min_cnt)
Y = reducer.fit_transform(np.array(E1))
n_vis = len(E1)
plt.figure(figsize=(8,8))
linewidth = 0
size = 1
if Y.shape[0] < 2500:
linewidth = 1
size = 5
if cat_counts is None:
plt.scatter(-Y[:,0], -Y[:,1], s=size, marker=".", linewidth=linewidth)
else:
#print(cat_counts[k])
n_disp = min(len(cat_counts[k]), Y.shape[0])
cur_max = math.log(max(cat_counts[k]))
norm_cat_count = [math.log(cat_counts[k][i]+1)/cur_max for i in range(0, len(cat_counts[k]))]
plt.scatter(-Y[0:n_disp,0], -Y[0:n_disp,1], s=size, marker=".", linewidth=linewidth, c=np.array(norm_cat_count)[0:n_disp], cmap="viridis")
plt.colorbar()
plt.title("UMAP: categorical var. " + str(k) + " (" + str(n_vis) + " of " + str(E.shape[0]) + ", min count " + str(min_cnt) + ")")
plt.savefig(output_dir + "/cat-" + str(k) + "-" + str(n_vis) + "-of-" + str(E.shape[0]) + "-umap.png")
plt.close()
def visualize_embeddings_tsne(emb_l,
output_dir = "",
max_size = 10000):
for k in range(0, len(emb_l)):
E = emb_l[k].weight.detach().cpu()
print("tsne", E.shape)
if E.shape[0] < 20:
print("Skipping small embedding")
continue
n_vis = min(max_size, E.shape[0])
tsne = manifold.TSNE(init="pca", random_state=0, method="exact")
Y = tsne.fit_transform(E[:n_vis,:])
plt.figure(figsize=(8, 8))
linewidth = 0
if Y.shape[0] < 5000:
linewidth = 1
plt.scatter(-Y[:,0], -Y[:,1], s=1, marker=".", linewidth=linewidth)
plt.title("TSNE: categorical var. " + str(k) + " (" + str(n_vis) + " of " + str(E.shape[0]) + ")")
plt.savefig(output_dir + "/cat-" + str(k) + "-" + str(n_vis) + "-of-" + str(E.shape[0]) + "-tsne.png")
plt.close()
def analyse_categorical_data(X_cat, n_days=10, output_dir=""):
# analyse categorical variables
n_vec = len(X_cat)
n_cat = len(X_cat[0])
n_days = n_days
print("n_vec", n_vec, "n_cat", n_cat)
# for c in train_data.X_cat:
# print(n_cat, c)
all_cat = np.array(X_cat)
print("all_cat.shape", all_cat.shape)
day_size = all_cat.shape[0]/n_days
for i in range(0,n_cat):
l_d = []
l_s1 = []
l_s2 = []
l_int = []
l_rem = []
cat = all_cat[:,i]
print("cat", i, cat.shape)
for d in range(1,n_days):
offset = int(d*day_size)
#print(offset)
cat1 = cat[:offset]
cat2 = cat[offset:]
s1 = set(cat1)
s2 = set(cat2)
intersect = list(s1 & s2)
#print(intersect)
l_d.append(d)
l_s1.append(len(s1))
l_s2.append(len(s2))
l_int.append(len(intersect))
l_rem.append((len(s1)-len(intersect)))
print(d, ",", len(s1), ",", len(s2), ",", len(intersect), ",", (len(s1)-len(intersect)))
print("spit", l_d)
print("before", l_s1)
print("after", l_s2)
print("inters.", l_int)
print("removed", l_rem)
plt.figure(figsize=(8,8))
plt.plot(l_d, l_s1, "g", label="before")
plt.plot(l_d, l_s2, "r", label="after")
plt.plot(l_d, l_int, "b", label="intersect")
plt.plot(l_d, l_rem, "y", label="removed")
plt.title("categorical var. "+str(i))
plt.legend()
plt.savefig(output_dir+"/cat-"+str(i).zfill(3)+".png")
plt.close()
def analyse_categorical_counts(X_cat, emb_l=None, output_dir=""):
# analyse categorical variables
n_vec = len(X_cat)
n_cat = len(X_cat[0])
print("n_vec", n_vec, "n_cat", n_cat)
# for c in train_data.X_cat:
# print(n_cat, c)
all_cat = np.array(X_cat)
print("all_cat.shape", all_cat.shape)
all_counts = []
for i in range(0,n_cat):
cat = all_cat[:,i]
if emb_l is None:
s = set(cat)
counts = np.zeros((len(s)))
print("cat", i, cat.shape, len(s))
else:
s = emb_l[i].weight.detach().cpu().shape[0]
counts = np.zeros((s))
print("cat", i, cat.shape, s)
for d in range(0,n_vec):
cv = int(cat[d])
counts[cv] = counts[cv]+1
all_counts.append(counts)
if emb_l is None:
plt.figure(figsize=(8,8))
plt.plot(counts)
plt.title("Categorical var "+str(i) + " cardinality " + str(len(counts)))
# plt.legend()
else:
E = emb_l[i].weight.detach().cpu().numpy()
norms = [np.linalg.norm(E[i], ord=2) for i in range(0,E.shape[0])]
fig, (ax0, ax1) = plt.subplots(2, 1)
fig.suptitle("Categorical variable: " + str(i)+" cardinality "+str(len(counts)))
ax0.plot(counts)
ax0.set_yscale("log")
ax0.set_title("Counts", fontsize=10)
ax1.plot(norms)
ax1.set_title("Norms", fontsize=10)
plt.savefig(output_dir+"/cat_counts-"+str(i).zfill(3)+".png")
plt.close()
return all_counts
def dlrm_output_wrap(dlrm, X, lS_o, lS_i, T):
all_feat_vec = []
all_cat_vec = []
x_vec = None
t_out = None
c_out = None
z_out = []
p_out = None
z_size = len(dlrm.top_l)
x = dlrm.apply_mlp(X, dlrm.bot_l)
# debug prints
#print("intermediate")
#print(x[0].detach().cpu().numpy())
x_vec = x[0].detach().cpu().numpy()
all_feat_vec.append(x_vec)
# all_X.append(x[0].detach().cpu().numpy())
# process sparse features(using embeddings), resulting in a list of row vectors
ly = dlrm.apply_emb(lS_o, lS_i, dlrm.emb_l)
for e in ly:
#print(e.detach().cpu().numpy())
all_feat_vec.append(e[0].detach().cpu().numpy())
all_cat_vec.append(e[0].detach().cpu().numpy())
all_feat_vec= np.concatenate(all_feat_vec, axis=0)
all_cat_vec= np.concatenate(all_cat_vec, axis=0)
# all_features.append(all_feat_vec)
# all_cat.append(all_cat_vec)
t_out = int(T.detach().cpu().numpy()[0,0])
# all_T.append(int(T.detach().cpu().numpy()[0,0]))
z = dlrm.interact_features(x, ly)
# print(z.detach().cpu().numpy())
# z_out = z.detach().cpu().numpy().flatten()
z_out.append(z.detach().cpu().numpy().flatten())
# all_z[0].append(z.detach().cpu().numpy().flatten())
# obtain probability of a click (using top mlp)
# print(dlrm.top_l)
# p = dlrm.apply_mlp(z, dlrm.top_l)
for i in range(0, z_size):
z = dlrm.top_l[i](z)
# if i < z_size-1:
# curr_z = z.detach().cpu().numpy().flatten()
z_out.append(z.detach().cpu().numpy().flatten())
# all_z[i+1].append(curr_z)
# print("z append", i)
# print("z",i, z.detach().cpu().numpy().flatten().shape)
p = z
# clamp output if needed
if 0.0 < dlrm.loss_threshold and dlrm.loss_threshold < 1.0:
z = torch.clamp(p, min=dlrm.loss_threshold, max=(1.0 - dlrm.loss_threshold))
else:
z = p
class_thresh = 0.0 #-0.25
zp = z.detach().cpu().numpy()[0,0]+ class_thresh
p_out = int(zp+0.5)
if p_out > 1:
p_out = 1
if p_out < 0:
p_out = 0
# all_pred.append(int(z.detach().cpu().numpy()[0,0]+0.5))
#print(int(z.detach().cpu().numpy()[0,0]+0.5))
if int(p_out) == t_out:
c_out = 0
else:
c_out = 1
return all_feat_vec, x_vec, all_cat_vec, t_out, c_out, z_out, p_out
def create_umap_data(dlrm, data_ld, max_size=50000, offset=0, info=""):
all_features = []
all_X = []
all_cat = []
all_T = []
all_c = []
all_z = []
all_pred = []
z_size = len(dlrm.top_l)
print("z_size", z_size)
for i in range(0, z_size):
all_z.append([])
for j, (X, lS_o, lS_i, T) in enumerate(data_ld):
if j < offset:
continue
if j >= max_size+offset:
break
af, x, cat, t, c, z, p = dlrm_output_wrap(dlrm, X, lS_o, lS_i, T)
all_features.append(af)
all_X.append(x)
all_cat.append(cat)
all_T.append(t)
all_c.append(c)
all_pred.append(p)
for i in range(0, z_size):
all_z[i].append(z[i])
# # calculate classifier metrics
ac = accuracy_score(all_T, all_pred)
f1 = f1_score(all_T, all_pred)
ps = precision_score(all_T, all_pred)
rc = recall_score(all_T, all_pred)
print(info, "accuracy", ac, "f1", f1, "precision", ps, "recall", rc)
return all_features, all_X, all_cat, all_T, all_z, all_c, all_pred
def plot_all_data_3(umap_Y,
umap_T,
train_Y = None,
train_T = None,
test_Y = None,
test_T = None,
total_train_size = "",
total_test_size = "",
info = "",
output_dir = "",
orig_space_dim = 0):
size = 1
colors = ["red","green"]
fig, (ax0, ax1, ax2) = plt.subplots(1, 3)
fig.suptitle("UMAP: " + info + " space dim "+str(orig_space_dim))
ax0.scatter(umap_Y[:,0], umap_Y[:,1], s=size, c=umap_T, cmap=matplotlib.colors.ListedColormap(colors), marker=".", linewidth=0)
ax0.set_title("UMAP ("+str(len(umap_T))+" of "+ total_train_size+")", fontsize=7)
if train_Y is not None and train_T is not None:
ax1.scatter(train_Y[:,0], train_Y[:,1], s=size, c=train_T, cmap=matplotlib.colors.ListedColormap(colors), marker=".", linewidth=0)
ax1.set_title("Train ("+str(len(train_T))+" of "+ total_train_size+")", fontsize=7)
if test_Y is not None and test_T is not None:
ax2.scatter(test_Y[:,0], test_Y[:,1], s=size, c=test_T, cmap=matplotlib.colors.ListedColormap(colors), marker=".", linewidth=0)
ax2.set_title("Test ("+str(len(test_T))+" of "+ total_test_size+")", fontsize=7)
plt.savefig(output_dir+"/"+info+"-umap.png")
plt.close()
def plot_one_class_3(umap_Y,
umap_T,
train_Y,
train_T,
test_Y,
test_T,
target = 0,
col = "red",
total_train_size = "",
total_test_size = "",
info = "",
output_dir = "",
orig_space_dim = 0):
size = 1
fig, (ax0, ax1, ax2) = plt.subplots(1, 3)
fig.suptitle("UMAP: "+ info + " space dim "+str(orig_space_dim))
ind_l_umap = [i for i,x in enumerate(umap_T) if x == target]
Y_umap_l = np.array([umap_Y[i,:] for i in ind_l_umap])
ax0.scatter(Y_umap_l[:,0], Y_umap_l[:,1], s=size, c=col, marker=".", linewidth=0)
ax0.set_title("UMAP, ("+str(len(umap_T))+" of "+ total_train_size+")", fontsize=7)
if train_Y is not None and train_T is not None:
ind_l_test = [i for i,x in enumerate(train_T) if x == target]
Y_test_l = np.array([train_Y[i,:] for i in ind_l_test])
ax1.scatter(Y_test_l[:,0], Y_test_l[:,1], s=size, c=col, marker=".", linewidth=0)
ax1.set_title("Train, ("+str(len(train_T))+" of "+ total_train_size+")", fontsize=7)
if test_Y is not None and test_T is not None:
ind_l_test = [i for i,x in enumerate(test_T) if x == target]
Y_test_l = np.array([test_Y[i,:] for i in ind_l_test])
ax2.scatter(Y_test_l[:,0], Y_test_l[:,1], s=size, c=col, marker=".", linewidth=0)
ax2.set_title("Test, ("+str(len(test_T))+" of "+ total_test_size+")", fontsize=7)
plt.savefig(output_dir+"/"+info+"-umap.png")
plt.close()
def visualize_umap_data(umap_Y,
umap_T,
umap_C,
umap_P,
train_Y,
train_T,
train_C,
train_P,
test_Y = None,
test_T = None,
test_C = None,
test_P = None,
total_train_size = "",
total_test_size = "",
info = "",
output_dir = "",
orig_space_dim = 0):
# all classes
plot_all_data_3(umap_Y = umap_Y,
umap_T = umap_T,
train_Y = train_Y,
train_T = train_T,
test_Y = test_Y,
test_T = test_T,
total_train_size = total_train_size,
total_test_size = total_test_size,
info = info,
output_dir = output_dir,
orig_space_dim = orig_space_dim)
# all predictions
plot_all_data_3(umap_Y = umap_Y,
umap_T = umap_P,
train_Y = train_Y,
train_T = train_P,
test_Y = test_Y,
test_T = test_P,
total_train_size = total_train_size,
total_test_size = total_test_size,
info = info+", all-predictions",
output_dir = output_dir,
orig_space_dim = orig_space_dim)
# class 0
plot_one_class_3(umap_Y = umap_Y,
umap_T = umap_T,
train_Y = train_Y,
train_T = train_T,
test_Y = test_Y,
test_T = test_T,
target = 0,
col = "red",
total_train_size = total_train_size,
total_test_size = total_test_size,
info = info+" class " + str(0),
output_dir = output_dir,
orig_space_dim = orig_space_dim)
# class 1
plot_one_class_3(umap_Y = umap_Y,
umap_T = umap_T,
train_Y = train_Y,
train_T = train_T,
test_Y = test_Y,
test_T = test_T,
target = 1,
col = "green",
total_train_size = total_train_size,
total_test_size = total_test_size,
info = info + " class " + str(1),
output_dir = output_dir,
orig_space_dim = orig_space_dim)
# correct classification
plot_one_class_3(umap_Y = umap_Y,
umap_T = umap_C,
train_Y = train_Y,
train_T = train_C,
test_Y = test_Y,
test_T = test_C,
target = 0,
col = "green",
total_train_size = total_train_size,
total_test_size = total_test_size,
info = info + " correct ",
output_dir = output_dir,
orig_space_dim = orig_space_dim)
# errors
plot_one_class_3(umap_Y = umap_Y,
umap_T = umap_C,
train_Y = train_Y,
train_T = train_C,
test_Y = test_Y,
test_T = test_C,
target = 1,
col = "red",
total_train_size = total_train_size,
total_test_size = total_test_size,
info = info + " errors ",
output_dir = output_dir,
orig_space_dim = orig_space_dim)
# prediction 0
plot_one_class_3(umap_Y = umap_Y,
umap_T = umap_P,
train_Y = train_Y,
train_T = train_P,
test_Y = test_Y,
test_T = test_P,
target = 0,
col = "red",
total_train_size = total_train_size,
total_test_size = total_test_size,
info = info + " predict-0 ",
output_dir = output_dir,
orig_space_dim = orig_space_dim)
# prediction 1
plot_one_class_3(umap_Y = umap_Y,
umap_T = umap_P,
train_Y = train_Y,
train_T = train_P,
test_Y = test_Y,
test_T = test_P,
target = 1,
col = "green",
total_train_size = total_train_size,
total_test_size = total_test_size,
info = info + " predict-1 ",
output_dir = output_dir,
orig_space_dim = orig_space_dim)
def hdbscan_clustering(umap_data, train_data, test_data, info="", output_dir=""):
clusterer = hdbscan.HDBSCAN(min_samples=10, min_cluster_size=500, prediction_data=True)
umap_labels = clusterer.fit_predict(umap_data)
train_labels, _ = hdbscan.approximate_predict(clusterer, train_data)
test_labels, _ = hdbscan.approximate_predict(clusterer, test_data)
fig, ((ax00, ax01, ax02), (ax10, ax11, ax12)) = plt.subplots(2, 3)
fig.suptitle("HDBSCAN clastering: "+ info )
# plot umap data
umap_clustered = (umap_labels >= 0)
umap_coll = collections.Counter(umap_clustered)
print("umap_clustered", umap_coll)
# print("umap_data", umap_data.shape)
# print("~umap_clustered", umap_clustered.count(False), ~umap_clustered)
ax00.scatter(umap_data[~umap_clustered, 0],
umap_data[~umap_clustered, 1],
c=(0.5, 0.5, 0.5),
s=0.1,
alpha=0.5)
ax00.set_title("UMAP Outliers " + str(umap_coll[False]), fontsize=7)
ax10.scatter(umap_data[umap_clustered, 0],
umap_data[umap_clustered, 1],
c=umap_labels[umap_clustered],
s=0.1,
cmap="Spectral")
ax10.set_title("UMAP Inliers " + str(umap_coll[True]), fontsize=7)
# plot train data
train_clustered = (train_labels >= 0)
train_coll = collections.Counter(train_clustered)
ax01.scatter(train_data[~train_clustered, 0],
train_data[~train_clustered, 1],
c=(0.5, 0.5, 0.5),
s=0.1,
alpha=0.5)
ax01.set_title("Train Outliers " + str(train_coll[False]), fontsize=7)
ax11.scatter(train_data[train_clustered, 0],
train_data[train_clustered, 1],
c=train_labels[train_clustered],
s=0.1,
cmap="Spectral")
ax11.set_title("Train Inliers " + str(train_coll[True]), fontsize=7)
# plot test data
test_clustered = (test_labels >= 0)
test_coll = collections.Counter(test_clustered)
ax02.scatter(test_data[~test_clustered, 0],
test_data[~test_clustered, 1],
c=(0.5, 0.5, 0.5),
s=0.1,
alpha=0.5)
ax02.set_title("Tets Outliers " + str(test_coll[False]), fontsize=7)
ax12.scatter(test_data[test_clustered, 0],
test_data[test_clustered, 1],
c=test_labels[test_clustered],
s=0.1,
cmap="Spectral")
ax12.set_title("Test Inliers " + str(test_coll[True]), fontsize=7)
plt.savefig(output_dir+"/"+info+"-hdbscan.png")
plt.close()
def visualize_all_data_umap(dlrm,
train_ld,
test_ld = None,
max_umap_size = 50000,
output_dir = "",
umap_metric = "euclidean"):
data_ratio = 1
print("creating umap data")
umap_train_feat, umap_train_X, umap_train_cat, umap_train_T, umap_train_z, umap_train_c, umap_train_p = create_umap_data(dlrm=dlrm, data_ld=train_ld, max_size=max_umap_size, offset=0, info="umap")
# transform train and test data
train_feat, train_X, train_cat, train_T, train_z, train_c, train_p = create_umap_data(dlrm=dlrm, data_ld=train_ld, max_size=max_umap_size*data_ratio, offset=max_umap_size, info="train")
test_feat, test_X, test_cat, test_T, test_z, test_c, test_p = create_umap_data(dlrm=dlrm, data_ld=test_ld, max_size=max_umap_size*data_ratio, offset=0, info="test")
print("umap_train_feat", np.array(umap_train_feat).shape)
reducer_all_feat = umap.UMAP(random_state=42, metric=umap_metric)
umap_feat_Y = reducer_all_feat.fit_transform(umap_train_feat)
train_feat_Y = reducer_all_feat.transform(train_feat)
test_feat_Y = reducer_all_feat.transform(test_feat)
visualize_umap_data(umap_Y = umap_feat_Y,
umap_T = umap_train_T,
umap_C = umap_train_c,
umap_P = umap_train_p,
train_Y = train_feat_Y,
train_T = train_T,
train_C = train_c,
train_P = train_p,
test_Y = test_feat_Y,
test_T = test_T,
test_C = test_c,
test_P = test_p,
total_train_size = str(len(train_ld)),
total_test_size = str(len(test_ld)),
info = "all-features",
output_dir = output_dir,
orig_space_dim = np.array(umap_train_feat).shape[1])
hdbscan_clustering(umap_data = umap_feat_Y,
train_data = train_feat_Y,
test_data = test_feat_Y,
info = "umap-all-features",
output_dir = output_dir)
# hdbscan_clustering(umap_data = np.array(umap_train_feat),
# train_data = np.array(train_feat),
# test_data = np.array(test_feat),
# info = "all-features",
# output_dir = output_dir)
print("umap_train_X", np.array(umap_train_X).shape)
reducer_X = umap.UMAP(random_state=42, metric=umap_metric)
umap_X_Y = reducer_X.fit_transform(umap_train_X)
train_X_Y = reducer_X.transform(train_X)
test_X_Y = reducer_X.transform(test_X)
visualize_umap_data(umap_Y = umap_X_Y,
umap_T = umap_train_T,
umap_C = umap_train_c,
umap_P = umap_train_p,
train_Y = train_X_Y,
train_T = train_T,
train_C = train_c,
train_P = train_p,
test_Y = test_X_Y,
test_T = test_T,
test_C = test_c,
test_P = test_p,
total_train_size = str(len(train_ld)),
total_test_size = str(len(test_ld)),
info = "cont-features",
output_dir = output_dir,
orig_space_dim = np.array(umap_train_X).shape[1])
print("umap_train_cat", np.array(umap_train_cat).shape)
reducer_cat = umap.UMAP(random_state=42, metric=umap_metric)
umap_cat_Y = reducer_cat.fit_transform(umap_train_cat)
train_cat_Y = reducer_cat.transform(train_cat)
test_cat_Y = reducer_cat.transform(test_cat)
visualize_umap_data(umap_Y = umap_cat_Y,
umap_T = umap_train_T,
umap_C = umap_train_c,
umap_P = umap_train_p,
train_Y = train_cat_Y,
train_T = train_T,
train_C = train_c,
train_P = train_p,
test_Y = test_cat_Y,
test_T = test_T,
test_C = test_c,
test_P = test_p,
total_train_size = str(len(train_ld)),
total_test_size = str(len(test_ld)),
info = "cat-features",
output_dir = output_dir,
orig_space_dim = np.array(umap_train_cat).shape[1])
# UMAP for z data
for i in range(0,len(umap_train_z)):
print("z", i, np.array(umap_train_z[i]).shape)
reducer_z = umap.UMAP(random_state=42, metric=umap_metric)
umap_z_Y = reducer_z.fit_transform(umap_train_z[i])
train_z_Y = reducer_z.transform(train_z[i])
test_z_Y = reducer_z.transform(test_z[i])
visualize_umap_data(umap_Y = umap_z_Y,
umap_T = umap_train_T,
umap_C = umap_train_c,
umap_P = umap_train_p,
train_Y = train_z_Y,
train_T = train_T,
train_C = train_c,
train_P = train_p,
test_Y = test_z_Y,
test_T = test_T,
test_C = test_c,
test_P = test_p,
total_train_size = str(len(train_ld)),
total_test_size = str(len(test_ld)),
info = "z-features-"+str(i),
output_dir = output_dir,
orig_space_dim = np.array(umap_train_z[i]).shape[1])
def analyze_model_data(output_dir,
dlrm,
train_ld,
test_ld,
train_data,
skip_embedding = False,
use_tsne = False,
max_umap_size = 50000,
max_tsne_size = 10000,
skip_categorical_analysis = False,
skip_data_plots = False,
umap_metric = "euclidean"):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if skip_embedding is False:
cat_counts = None
cat_counts = analyse_categorical_counts(X_cat=train_data.X_cat, emb_l=dlrm.emb_l, output_dir=output_dir)
visualize_embeddings_umap(emb_l = dlrm.emb_l,
output_dir = output_dir,
max_size = max_umap_size,
umap_metric = umap_metric,
cat_counts = cat_counts)
if use_tsne is True:
visualize_embeddings_tsne(emb_l = dlrm.emb_l,
output_dir = output_dir,
max_size = max_tsne_size)
# data visualization and analysis
if skip_data_plots is False:
visualize_all_data_umap(dlrm=dlrm, train_ld=train_ld, test_ld=test_ld, max_umap_size=max_umap_size, output_dir=output_dir, umap_metric=umap_metric)
# analyse categorical variables
if skip_categorical_analysis is False and args.data_randomize == "none":
analyse_categorical_data(X_cat=train_data.X_cat, n_days=10, output_dir=output_dir)
if __name__ == "__main__":
output_dir = ""
### parse arguments ###
parser = argparse.ArgumentParser(
description="Exploratory DLRM analysis"
)
parser.add_argument("--load-model", type=str, default="")
parser.add_argument("--data-set", choices=["kaggle", "terabyte"], help="dataset")
# parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--max-ind-range", type=int, default=-1)
# parser.add_argument("--mlperf-bin-loader", action="store_true", default=False)
parser.add_argument("--output-dir", type=str, default="")
parser.add_argument("--skip-embedding", action="store_true", default=False)
parser.add_argument("--umap-metric", type=str, default="euclidean")
parser.add_argument("--skip-data-plots", action="store_true", default=False)
parser.add_argument("--skip-categorical-analysis", action="store_true", default=False)
# umap relatet
parser.add_argument("--max-umap-size", type=int, default=50000)
# tsne related
parser.add_argument("--use-tsne", action="store_true", default=False)
parser.add_argument("--max-tsne-size", type=int, default=1000)
# data file related
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--data-randomize", type=str, default="total") # none, total or day or none
parser.add_argument("--memory-map", action="store_true", default=False)
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--test-mini-batch-size", type=int, default=1)
parser.add_argument("--test-num-workers", type=int, default=0)
parser.add_argument("--num-batches", type=int, default=0)
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
args = parser.parse_args()
print("command line args: ", json.dumps(vars(args)))
if output_dir == "":
output_dir = args.data_set+"-"+os.path.split(args.load_model)[-1]+"-vis_all"
print("output_dir:", output_dir)
if args.data_set == "kaggle":
# 1. Criteo Kaggle Display Advertisement Challenge Dataset (see ./bench/dlrm_s_criteo_kaggle.sh)
m_spa=16
ln_emb=np.array([1460,583,10131227,2202608,305,24,12517,633,3,93145,5683,8351593,3194,27,14992,5461306,10,5652,2173,4,7046547,18,15,286181,105,142572])
ln_bot=np.array([13,512,256,64,16])
ln_top=np.array([367,512,256,1])
elif args.dataset == "terabyte":
if args.max_ind_range == 10000000:
# 2. Criteo Terabyte (see ./bench/dlrm_s_criteo_terabyte.sh [--sub-sample=0.875] --max-in-range=10000000)
m_spa=64
ln_emb=np.array([9980333,36084,17217,7378,20134,3,7112,1442,61, 9758201,1333352,313829,10,2208,11156,122,4,970,14, 9994222, 7267859, 9946608,415421,12420,101, 36])
ln_bot=np.array([13,512,256,64])
ln_top=np.array([415,512,512,256,1])
elif args.max_ind_range == 40000000:
# 3. Criteo Terabyte MLPerf training (see ./bench/run_and_time.sh --max-in-range=40000000)
m_spa=128
ln_emb=np.array([39884406,39043,17289,7420,20263,3,7120,1543,63,38532951,2953546,403346,10,2208,11938,155,4,976,14,39979771,25641295,39664984,585935,12972,108,36])
ln_bot=np.array([13,512,256,128])
ln_top=np.array([479,1024,1024,512,256,1])
else:
raise ValueError("only --max-in-range 10M or 40M is supported")
else:
raise ValueError("only kaggle|terabyte dataset options are supported")
# check input parameters
if args.data_randomize != "none" and args.skip_categorical_analysis is not True:
print("Incorrect option for categoricat analysis, use: --data-randomize=none")
sys.exit(-1)
dlrm = DLRM_Net(
m_spa,
ln_emb,
ln_bot,
ln_top,
arch_interaction_op="dot",
arch_interaction_itself=False,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 2,
sync_dense_params=True,
loss_threshold=0.0,
ndevices=-1,
qr_flag=False,
qr_operation=None,
qr_collisions=None,
qr_threshold=None,
md_flag=False,
md_threshold=None,
)
# Load model is specified
if not (args.load_model == ""):
print("Loading saved model {}".format(args.load_model))
ld_model = torch.load(args.load_model, map_location=torch.device("cpu"))
dlrm.load_state_dict(ld_model["state_dict"])
print("Model loaded", args.load_model)
#print(dlrm)
z_size = len(dlrm.top_l)
for i in range(0, z_size):
print("z", i, dlrm.top_l[i])
# load data
train_data = None
test_data = None
if args.raw_data_file is not "" or args.processed_data_file is not "":
train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args)
analyze_model_data(output_dir = output_dir,
dlrm = dlrm,
train_ld = train_ld,
test_ld = test_ld,
train_data = train_data,
skip_embedding = args.skip_embedding,
use_tsne = args.use_tsne,
max_umap_size = args.max_umap_size,
max_tsne_size = args.max_tsne_size,
skip_categorical_analysis = args.skip_categorical_analysis,
skip_data_plots = args.skip_data_plots,
umap_metric = args.umap_metric)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: compile .so from python code
from __future__ import absolute_import, division, print_function, unicode_literals
from setuptools import setup
from Cython.Build import cythonize
from distutils.extension import Extension
ext_modules = [
Extension(
"data_utils_cython",
["data_utils_cython.pyx"],
extra_compile_args=['-O3'],
extra_link_args=['-O3'],
)
]
setup(
name='data_utils_cython',
ext_modules=cythonize(ext_modules)
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Description: run dataset pre-processing in standalone mode
# WARNING: These steps are required to work with Cython
# 1. Instal Cython
# > sudo yum install Cython
# 2. Please copy data_utils.py into data_utils_cython.pyx
# 3. Compile the data_utils_cython.pyx to generate .so
# (it's important to keep extension .pyx rather than .py
# to ensure the C/C++ .so no .py is loaded at import time)
# > python cython_compile.py build_ext --inplace
# This should create data_utils_cython.so, which can be loaded below with "import"
# 4. Run standalone datatset preprocessing to generate .npz files
# a. Kaggle
# > python cython_criteo.py --data-set=kaggle --raw-data-file=./input/train.txt
# --processed-data-file=./input/kaggleAdDisplayChallenge_processed.npz
# b. Terabyte
# > python cython_criteo.py --max-ind-range=10000000 [--memory-map] --data-set=terabyte
# --raw-data-file=./input/day --processed-data-file=./input/terabyte_processed.npz
from __future__ import absolute_import, division, print_function, unicode_literals
import data_utils_cython as duc
if __name__ == "__main__":
### import packages ###
import argparse
### parse arguments ###
parser = argparse.ArgumentParser(
description="Preprocess Criteo dataset"
)
# model related parameters
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--memory-map", action="store_true", default=False)
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
args = parser.parse_args()
duc.loadDataset(
args.data_set,
args.max_ind_range,
args.data_sub_sample_rate,
args.data_randomize,
"train",
args.raw_data_file,
args.processed_data_file,
args.memory_map
)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Mixed-Dimensions Trick
#
# Description: Applies mixed dimension trick to embeddings to reduce
# embedding sizes.
#
# References:
# [1] Antonio Ginart, Maxim Naumov, Dheevatsa Mudigere, Jiyan Yang, James Zou,
# "Mixed Dimension Embeddings with Application to Memory-Efficient Recommendation
# Systems", CoRR, arXiv:1909.11810, 2019
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
def md_solver(n, alpha, d0=None, B=None, round_dim=True, k=None):
'''
An external facing function call for mixed-dimension assignment
with the alpha power temperature heuristic
Inputs:
n -- (torch.LongTensor) ; Vector of num of rows for each embedding matrix
alpha -- (torch.FloatTensor); Scalar, non-negative, controls dim. skew
d0 -- (torch.FloatTensor); Scalar, baseline embedding dimension
B -- (torch.FloatTensor); Scalar, parameter budget for embedding layer
round_dim -- (bool); flag for rounding dims to nearest pow of 2
k -- (torch.LongTensor) ; Vector of average number of queries per inference
'''
n, indices = torch.sort(n)
k = k[indices] if k is not None else torch.ones(len(n))
d = alpha_power_rule(n.type(torch.float) / k, alpha, d0=d0, B=B)
if round_dim:
d = pow_2_round(d)
undo_sort = [0] * len(indices)
for i, v in enumerate(indices):
undo_sort[v] = i
return d[undo_sort]
def alpha_power_rule(n, alpha, d0=None, B=None):
if d0 is not None:
lamb = d0 * (n[0].type(torch.float) ** alpha)
elif B is not None:
lamb = B / torch.sum(n.type(torch.float) ** (1 - alpha))
else:
raise ValueError("Must specify either d0 or B")
d = torch.ones(len(n)) * lamb * (n.type(torch.float) ** (-alpha))
for i in range(len(d)):
if i == 0 and d0 is not None:
d[i] = d0
else:
d[i] = 1 if d[i] < 1 else d[i]
return (torch.round(d).type(torch.long))
def pow_2_round(dims):
return 2 ** torch.round(torch.log2(dims.type(torch.float)))
class PrEmbeddingBag(nn.Module):
def __init__(self, num_embeddings, embedding_dim, base_dim):
super(PrEmbeddingBag, self).__init__()
self.embs = nn.EmbeddingBag(
num_embeddings, embedding_dim, mode="sum", sparse=True)
torch.nn.init.xavier_uniform_(self.embs.weight)
if embedding_dim < base_dim:
self.proj = nn.Linear(embedding_dim, base_dim, bias=False)
torch.nn.init.xavier_uniform_(self.proj.weight)
elif embedding_dim == base_dim:
self.proj = nn.Identity()
else:
raise ValueError(
"Embedding dim " + str(embedding_dim) + " > base dim " + str(base_dim)
)
def forward(self, input, offsets=None, per_sample_weights=None):
return self.proj(self.embs(
input, offsets=offsets, per_sample_weights=per_sample_weights))
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Quotient-Remainder Trick
#
# Description: Applies quotient remainder-trick to embeddings to reduce
# embedding sizes.
#
# References:
# [1] Hao-Jun Michael Shi, Dheevatsa Mudigere, Maxim Naumov, Jiyan Yang,
# "Compositional Embeddings Using Complementary Partitions for Memory-Efficient
# Recommendation Systems", CoRR, arXiv:1909.02107, 2019
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
class QREmbeddingBag(nn.Module):
r"""Computes sums or means over two 'bags' of embeddings, one using the quotient
of the indices and the other using the remainder of the indices, without
instantiating the intermediate embeddings, then performs an operation to combine these.
For bags of constant length and no :attr:`per_sample_weights`, this class
* with ``mode="sum"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.sum(dim=0)``,
* with ``mode="mean"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.mean(dim=0)``,
* with ``mode="max"`` is equivalent to :class:`~torch.nn.Embedding` followed by ``torch.max(dim=0)``.
However, :class:`~torch.nn.EmbeddingBag` is much more time and memory efficient than using a chain of these
operations.
QREmbeddingBag also supports per-sample weights as an argument to the forward
pass. This scales the output of the Embedding before performing a weighted
reduction as specified by ``mode``. If :attr:`per_sample_weights`` is passed, the
only supported ``mode`` is ``"sum"``, which computes a weighted sum according to
:attr:`per_sample_weights`.
Known Issues:
Autograd breaks with multiple GPUs. It breaks only with multiple embeddings.
Args:
num_categories (int): total number of unique categories. The input indices must be in
0, 1, ..., num_categories - 1.
embedding_dim (list): list of sizes for each embedding vector in each table. If ``"add"``
or ``"mult"`` operation are used, these embedding dimensions must be
the same. If a single embedding_dim is used, then it will use this
embedding_dim for both embedding tables.
num_collisions (int): number of collisions to enforce.
operation (string, optional): ``"concat"``, ``"add"``, or ``"mult". Specifies the operation
to compose embeddings. ``"concat"`` concatenates the embeddings,
``"add"`` sums the embeddings, and ``"mult"`` multiplies
(component-wise) the embeddings.
Default: ``"mult"``
max_norm (float, optional): If given, each embedding vector with norm larger than :attr:`max_norm`
is renormalized to have norm :attr:`max_norm`.
norm_type (float, optional): The p of the p-norm to compute for the :attr:`max_norm` option. Default ``2``.
scale_grad_by_freq (boolean, optional): if given, this will scale gradients by the inverse of frequency of
the words in the mini-batch. Default ``False``.
Note: this option is not supported when ``mode="max"``.
mode (string, optional): ``"sum"``, ``"mean"`` or ``"max"``. Specifies the way to reduce the bag.
``"sum"`` computes the weighted sum, taking :attr:`per_sample_weights`
into consideration. ``"mean"`` computes the average of the values
in the bag, ``"max"`` computes the max value over each bag.
Default: ``"mean"``
sparse (bool, optional): if ``True``, gradient w.r.t. :attr:`weight` matrix will be a sparse tensor. See
Notes for more details regarding sparse gradients. Note: this option is not
supported when ``mode="max"``.
Attributes:
weight (Tensor): the learnable weights of each embedding table is the module of shape
`(num_embeddings, embedding_dim)` initialized using a uniform distribution
with sqrt(1 / num_categories).
Inputs: :attr:`input` (LongTensor), :attr:`offsets` (LongTensor, optional), and
:attr:`per_index_weights` (Tensor, optional)
- If :attr:`input` is 2D of shape `(B, N)`,
it will be treated as ``B`` bags (sequences) each of fixed length ``N``, and
this will return ``B`` values aggregated in a way depending on the :attr:`mode`.
:attr:`offsets` is ignored and required to be ``None`` in this case.
- If :attr:`input` is 1D of shape `(N)`,
it will be treated as a concatenation of multiple bags (sequences).
:attr:`offsets` is required to be a 1D tensor containing the
starting index positions of each bag in :attr:`input`. Therefore,
for :attr:`offsets` of shape `(B)`, :attr:`input` will be viewed as
having ``B`` bags. Empty bags (i.e., having 0-length) will have
returned vectors filled by zeros.
per_sample_weights (Tensor, optional): a tensor of float / double weights, or None
to indicate all weights should be taken to be ``1``. If specified, :attr:`per_sample_weights`
must have exactly the same shape as input and is treated as having the same
:attr:`offsets`, if those are not ``None``. Only supported for ``mode='sum'``.
Output shape: `(B, embedding_dim)`
"""
__constants__ = ['num_categories', 'embedding_dim', 'num_collisions',
'operation', 'max_norm', 'norm_type', 'scale_grad_by_freq',
'mode', 'sparse']
def __init__(self, num_categories, embedding_dim, num_collisions,
operation='mult', max_norm=None, norm_type=2.,
scale_grad_by_freq=False, mode='mean', sparse=False,
_weight=None):
super(QREmbeddingBag, self).__init__()
assert operation in ['concat', 'mult', 'add'], 'Not valid operation!'
self.num_categories = num_categories
if isinstance(embedding_dim, int) or len(embedding_dim) == 1:
self.embedding_dim = [embedding_dim, embedding_dim]
else:
self.embedding_dim = embedding_dim
self.num_collisions = num_collisions
self.operation = operation
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
if self.operation == 'add' or self.operation == 'mult':
assert self.embedding_dim[0] == self.embedding_dim[1], \
'Embedding dimensions do not match!'
self.num_embeddings = [int(np.ceil(num_categories / num_collisions)),
num_collisions]
if _weight is None:
self.weight_q = Parameter(torch.Tensor(self.num_embeddings[0], self.embedding_dim[0]))
self.weight_r = Parameter(torch.Tensor(self.num_embeddings[1], self.embedding_dim[1]))
self.reset_parameters()
else:
assert list(_weight[0].shape) == [self.num_embeddings[0], self.embedding_dim[0]], \
'Shape of weight for quotient table does not match num_embeddings and embedding_dim'
assert list(_weight[1].shape) == [self.num_embeddings[1], self.embedding_dim[1]], \
'Shape of weight for remainder table does not match num_embeddings and embedding_dim'
self.weight_q = Parameter(_weight[0])
self.weight_r = Parameter(_weight[1])
self.mode = mode
self.sparse = sparse
def reset_parameters(self):
nn.init.uniform_(self.weight_q, np.sqrt(1 / self.num_categories))
nn.init.uniform_(self.weight_r, np.sqrt(1 / self.num_categories))
def forward(self, input, offsets=None, per_sample_weights=None):
input_q = (input / self.num_collisions).long()
input_r = torch.remainder(input, self.num_collisions).long()
embed_q = F.embedding_bag(input_q, self.weight_q, offsets, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.mode,
self.sparse, per_sample_weights)
embed_r = F.embedding_bag(input_r, self.weight_r, offsets, self.max_norm,
self.norm_type, self.scale_grad_by_freq, self.mode,
self.sparse, per_sample_weights)
if self.operation == 'concat':
embed = torch.cat((embed_q, embed_r), dim=1)
elif self.operation == 'add':
embed = embed_q + embed_r
elif self.operation == 'mult':
embed = embed_q * embed_r
return embed
def extra_repr(self):
s = '{num_embeddings}, {embedding_dim}'
if self.max_norm is not None:
s += ', max_norm={max_norm}'
if self.norm_type != 2:
s += ', norm_type={norm_type}'
if self.scale_grad_by_freq is not False:
s += ', scale_grad_by_freq={scale_grad_by_freq}'
s += ', mode={mode}'
return s.format(**self.__dict__)
|
import torch
# OSS import
try:
# pyre-ignore[21]
# @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:dlrm_dataloader
from .data.dlrm_dataloader import get_dataloader
except ImportError:
pass
import itertools
import os
from pyre_extensions import none_throws
from torch import distributed as dist
from torchbenchmark.tasks import RECOMMENDATION
from torchrec import EmbeddingBagCollection
from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES
from torchrec.distributed import TrainPipelineSparseDist
from torchrec.distributed.shard import shard_modules
from torchrec.models.dlrm import DLRM, DLRM_DCN, DLRM_Projection, DLRMTrain
from torchrec.modules.embedding_configs import EmbeddingBagConfig
from torchrec.optim.apply_optimizer_in_backward import apply_optimizer_in_backward
from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper
from torchrec.optim.optimizers import in_backward_optimizer_filter
from ...util.model import BenchmarkModel
from .args import InteractionType, parse_args
class Model(BenchmarkModel):
task = RECOMMENDATION.RECOMMENDATION
DEFAULT_TRAIN_BSIZE = 1024
DEFAULT_EVAL_BSIZE = 1024
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
args = parse_args(self.extra_args)
backend = "nccl" if self.device == "cuda" else "gloo"
device = torch.device(self.device)
os.environ["RANK"] = "0"
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
if not dist.is_initialized():
dist.init_process_group(backend=backend)
# initialize example data
if self.test == "train":
args.batch_size = self.batch_size
loader = get_dataloader(args, backend, "train")
if self.test == "eval":
args.test_batch_size = self.batch_size
loader = get_dataloader(args, backend, "test")
self.iterator = itertools.cycle(iter(loader))
self.example_inputs = next(self.iterator).to(device)
# parse the args
args.dense_arch_layer_sizes = [int(x) for x in args.dense_arch_layer_sizes.split(',') if x.strip().isdigit()]
args.over_arch_layer_sizes = [int(x) for x in args.over_arch_layer_sizes.split(',') if x.strip().isdigit()]
args.interaction_branch1_layer_sizes = [int(x) for x in args.interaction_branch1_layer_sizes.split(',') if x.strip().isdigit()]
args.interaction_branch2_layer_sizes = [int(x) for x in args.interaction_branch2_layer_sizes.split(',') if x.strip().isdigit()]
assert args.in_memory_binary_criteo_path == None and args.synthetic_multi_hot_criteo_path == None, \
f"Torchbench only supports random data inputs."
eb_configs = [
EmbeddingBagConfig(
name=f"t_{feature_name}",
embedding_dim=args.embedding_dim,
num_embeddings=none_throws(args.num_embeddings_per_feature)[feature_idx]
if args.num_embeddings is None
else args.num_embeddings,
feature_names=[feature_name],
)
for feature_idx, feature_name in enumerate(DEFAULT_CAT_NAMES)
]
dlrm_model = DLRM_DCN(
embedding_bag_collection=EmbeddingBagCollection(
tables=eb_configs, device=device
),
dense_in_features=len(DEFAULT_INT_NAMES),
dense_arch_layer_sizes=args.dense_arch_layer_sizes,
over_arch_layer_sizes=args.over_arch_layer_sizes,
dcn_num_layers=args.dcn_num_layers,
dcn_low_rank_dim=args.dcn_low_rank_dim,
dense_device=device,
)
train_model = DLRMTrain(dlrm_model)
# This will apply the Adagrad optimizer in the backward pass for the embeddings (sparse_arch). This means that
# the optimizer update will be applied in the backward pass, in this case through a fused op.
# TorchRec will use the FBGEMM implementation of EXACT_ADAGRAD. For GPU devices, a fused CUDA kernel is invoked. For CPU, FBGEMM_GPU invokes CPU kernels
# https://github.com/pytorch/FBGEMM/blob/2cb8b0dff3e67f9a009c4299defbd6b99cc12b8f/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py#L676-L678
apply_optimizer_in_backward(
torch.optim.Adagrad,
train_model.model.sparse_arch.parameters(),
{"lr": args.learning_rate},
)
self.model = shard_modules(
module=train_model,
device=device
).to(device)
dense_optimizer = KeyedOptimizerWrapper(
dict(in_backward_optimizer_filter(self.model.named_parameters())),
lambda params: torch.optim.Adagrad(params, lr=args.learning_rate),
)
# fused optimizer will already be called
opt = CombinedOptimizer([dense_optimizer])
if args.multi_hot_sizes is not None:
raise RuntimeError("Multi-hot is not supported in TorchBench.")
if self.test == "train":
self.opt = opt
self.train_pipeline = TrainPipelineSparseDist(
self.model,
opt,
device,
)
self.model.train()
elif self.test == "eval":
self.model.eval()
def get_module(self):
return self.model, (self.example_inputs, )
def train(self):
self.train_pipeline.progress(self.iterator)
def eval(self):
with torch.no_grad():
_loss, logits = self.model(self.example_inputs)
return logits
|
import argparse
from enum import Enum
from typing import List
class InteractionType(Enum):
ORIGINAL = "original"
DCN = "dcn"
PROJECTION = "projection"
def __str__(self):
return self.value
def parse_args(argv: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser(description="torchrec dlrm example trainer")
parser.add_argument(
"--epochs",
type=int,
default=1,
help="number of epochs to train",
)
parser.add_argument(
"--batch_size",
type=int,
default=1024,
help="batch size to use for training",
)
parser.add_argument(
"--drop_last_training_batch",
dest="drop_last_training_batch",
action="store_true",
help="Drop the last non-full training batch",
)
parser.add_argument(
"--test_batch_size",
type=int,
default=None,
help="batch size to use for validation and testing",
)
parser.add_argument(
"--limit_train_batches",
type=int,
default=None,
help="number of train batches",
)
parser.add_argument(
"--limit_val_batches",
type=int,
default=None,
help="number of validation batches",
)
parser.add_argument(
"--limit_test_batches",
type=int,
default=None,
help="number of test batches",
)
parser.add_argument(
"--dataset_name",
type=str,
default="criteo_1t",
help="dataset for experiment, current support criteo_1tb, criteo_kaggle",
)
parser.add_argument(
"--num_embeddings",
type=int,
default=100_000,
help="max_ind_size. The number of embeddings in each embedding table. Defaults"
" to 100_000 if num_embeddings_per_feature is not supplied.",
)
parser.add_argument(
"--num_embeddings_per_feature",
type=str,
default=None,
help="Comma separated max_ind_size per sparse feature. The number of embeddings"
" in each embedding table. 26 values are expected for the Criteo dataset.",
)
parser.add_argument(
"--dense_arch_layer_sizes",
type=str,
default="512,256,64",
help="Comma separated layer sizes for dense arch.",
)
parser.add_argument(
"--over_arch_layer_sizes",
type=str,
default="512,512,256,1",
help="Comma separated layer sizes for over arch.",
)
parser.add_argument(
"--embedding_dim",
type=int,
default=64,
help="Size of each embedding.",
)
parser.add_argument(
"--interaction_branch1_layer_sizes",
type=str,
default="2048,2048",
help="Comma separated layer sizes for interaction branch1 (only on dlrm with projection).",
)
parser.add_argument(
"--interaction_branch2_layer_sizes",
type=str,
default="2048,2048",
help="Comma separated layer sizes for interaction branch2 (only on dlrm with projection).",
)
parser.add_argument(
"--dcn_num_layers",
type=int,
default=3,
help="Number of DCN layers in interaction layer (only on dlrm with DCN).",
)
parser.add_argument(
"--dcn_low_rank_dim",
type=int,
default=512,
help="Low rank dimension for DCN in interaction layer (only on dlrm with DCN).",
)
parser.add_argument(
"--undersampling_rate",
type=float,
help="Desired proportion of zero-labeled samples to retain (i.e. undersampling zero-labeled rows)."
" Ex. 0.3 indicates only 30pct of the rows with label 0 will be kept."
" All rows with label 1 will be kept. Value should be between 0 and 1."
" When not supplied, no undersampling occurs.",
)
parser.add_argument(
"--seed",
type=int,
help="Random seed for reproducibility.",
)
parser.add_argument(
"--pin_memory",
dest="pin_memory",
action="store_true",
help="Use pinned memory when loading data.",
)
parser.add_argument(
"--mmap_mode",
dest="mmap_mode",
action="store_true",
help="--mmap_mode mmaps the dataset."
" That is, the dataset is kept on disk but is accessed as if it were in memory."
" --mmap_mode is intended mostly for faster debugging. Use --mmap_mode to bypass"
" preloading the dataset when preloading takes too long or when there is "
" insufficient memory available to load the full dataset.",
)
parser.add_argument(
"--in_memory_binary_criteo_path",
type=str,
default=None,
help="Directory path containing the Criteo dataset npy files.",
)
parser.add_argument(
"--synthetic_multi_hot_criteo_path",
type=str,
default=None,
help="Directory path containing the MLPerf v2 synthetic multi-hot dataset npz files.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=15.0,
help="Learning rate.",
)
parser.add_argument(
"--shuffle_batches",
dest="shuffle_batches",
action="store_true",
help="Shuffle each batch during training.",
)
parser.add_argument(
"--shuffle_training_set",
dest="shuffle_training_set",
action="store_true",
help="Shuffle the training set in memory. This will override mmap_mode",
)
parser.add_argument(
"--validation_freq_within_epoch",
type=int,
default=None,
help="Frequency at which validation will be run within an epoch.",
)
parser.set_defaults(
pin_memory=None,
mmap_mode=None,
drop_last=None,
shuffle_batches=None,
shuffle_training_set=None,
)
parser.add_argument(
"--collect_multi_hot_freqs_stats",
dest="collect_multi_hot_freqs_stats",
action="store_true",
help="Flag to determine whether to collect stats on freq of embedding access.",
)
parser.add_argument(
"--multi_hot_sizes",
type=str,
default=None,
help="Comma separated multihot size per sparse feature. 26 values are expected for the Criteo dataset.",
)
parser.add_argument(
"--multi_hot_distribution_type",
type=str,
choices=["uniform", "pareto"],
default=None,
help="Multi-hot distribution options.",
)
parser.add_argument("--lr_warmup_steps", type=int, default=0)
parser.add_argument("--lr_decay_start", type=int, default=0)
parser.add_argument("--lr_decay_steps", type=int, default=0)
parser.add_argument(
"--print_lr",
action="store_true",
help="Print learning rate every iteration.",
)
parser.add_argument(
"--allow_tf32",
action="store_true",
help="Enable TensorFloat-32 mode for matrix multiplications on A100 (or newer) GPUs.",
)
parser.add_argument(
"--print_sharding_plan",
action="store_true",
help="Print the sharding plan used for each embedding table.",
)
return parser.parse_args(argv)
|
import subprocess
import sys
import os
from pathlib import Path
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from typing import List
from torch import distributed as dist
from torch.utils.data import DataLoader
from torchrec.datasets.criteo import (
CAT_FEATURE_COUNT,
DAYS,
DEFAULT_CAT_NAMES,
DEFAULT_INT_NAMES,
InMemoryBinaryCriteoIterDataPipe,
)
from torchrec.datasets.random import RandomRecDataset
# OSS import
try:
# pyre-ignore[21]
# @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:multi_hot_criteo
from data.multi_hot_criteo import MultiHotCriteoIterDataPipe
except ImportError:
pass
# internal import
try:
from .multi_hot_criteo import MultiHotCriteoIterDataPipe # noqa F811
except ImportError:
pass
STAGES = ["train", "val", "test"]
def _get_random_dataloader(
args: argparse.Namespace,
stage: str,
) -> DataLoader:
attr = f"limit_{stage}_batches"
num_batches = getattr(args, attr)
if stage in ["val", "test"] and args.test_batch_size is not None:
batch_size = args.test_batch_size
else:
batch_size = args.batch_size
return DataLoader(
RandomRecDataset(
keys=DEFAULT_CAT_NAMES,
batch_size=batch_size,
hash_size=args.num_embeddings,
hash_sizes=args.num_embeddings_per_feature
if hasattr(args, "num_embeddings_per_feature")
else None,
manual_seed=getattr(args, "seed", None),
ids_per_feature=1,
num_dense=len(DEFAULT_INT_NAMES),
num_batches=num_batches,
),
batch_size=None,
batch_sampler=None,
pin_memory=args.pin_memory,
num_workers=0,
)
def _get_in_memory_dataloader(
args: argparse.Namespace,
stage: str,
) -> DataLoader:
if args.in_memory_binary_criteo_path is not None:
dir_path = args.in_memory_binary_criteo_path
sparse_part = "sparse.npy"
datapipe = InMemoryBinaryCriteoIterDataPipe
else:
dir_path = args.synthetic_multi_hot_criteo_path
sparse_part = "sparse_multi_hot.npz"
datapipe = MultiHotCriteoIterDataPipe
if stage == "train":
stage_files: List[List[str]] = [
[os.path.join(dir_path, f"day_{i}_dense.npy") for i in range(DAYS - 1)],
[os.path.join(dir_path, f"day_{i}_{sparse_part}") for i in range(DAYS - 1)],
[os.path.join(dir_path, f"day_{i}_labels.npy") for i in range(DAYS - 1)],
]
elif stage in ["val", "test"]:
stage_files: List[List[str]] = [
[os.path.join(dir_path, f"day_{DAYS-1}_dense.npy")],
[os.path.join(dir_path, f"day_{DAYS-1}_{sparse_part}")],
[os.path.join(dir_path, f"day_{DAYS-1}_labels.npy")],
]
if stage in ["val", "test"] and args.test_batch_size is not None:
batch_size = args.test_batch_size
else:
batch_size = args.batch_size
dataloader = DataLoader(
datapipe(
stage,
*stage_files, # pyre-ignore[6]
batch_size=batch_size,
rank=dist.get_rank(),
world_size=dist.get_world_size(),
drop_last=args.drop_last_training_batch if stage == "train" else False,
shuffle_batches=args.shuffle_batches,
shuffle_training_set=args.shuffle_training_set,
shuffle_training_set_random_seed=args.seed,
mmap_mode=args.mmap_mode,
hashes=args.num_embeddings_per_feature
if args.num_embeddings is None
else ([args.num_embeddings] * CAT_FEATURE_COUNT),
),
batch_size=None,
pin_memory=args.pin_memory,
collate_fn=lambda x: x,
)
return dataloader
def get_dataloader(args: argparse.Namespace, backend: str, stage: str) -> DataLoader:
"""
Gets desired dataloader from dlrm_main command line options. Currently, this
function is able to return either a DataLoader wrapped around a RandomRecDataset or
a Dataloader wrapped around an InMemoryBinaryCriteoIterDataPipe.
Args:
args (argparse.Namespace): Command line options supplied to dlrm_main.py's main
function.
backend (str): "nccl" or "gloo".
stage (str): "train", "val", or "test".
Returns:
dataloader (DataLoader): PyTorch dataloader for the specified options.
"""
stage = stage.lower()
if stage not in STAGES:
raise ValueError(f"Supplied stage was {stage}. Must be one of {STAGES}.")
args.pin_memory = (
(backend == "nccl") if not hasattr(args, "pin_memory") else args.pin_memory
)
if (
args.in_memory_binary_criteo_path is None
and args.synthetic_multi_hot_criteo_path is None
):
return _get_random_dataloader(args, stage)
else:
return _get_in_memory_dataloader(args, stage)
|
from .dataloader import SuperSloMo
from .model_wrapper import Model as ModelWrapper
import torch
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import random
from typing import Tuple
import os
import numpy as np
from argparse import Namespace
from pathlib import Path
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark import DATA_PATH
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def _prefetch(data, device):
result = []
for item in data:
result.append(item.to(device))
return tuple(result)
class Model(BenchmarkModel):
task = COMPUTER_VISION.VIDEO_INTERPOLATION
# Original code config:
# train batch size: 6
# eval batch size: 10
# hardware platform: Nvidia GTX 1080 Ti
# Source: https://github.com/avinashpaliwal/Super-SloMo/blob/master/train.ipynb
DEFAULT_TRAIN_BSIZE = 6
# use smaller batch size to fit on Nvidia T4
DEFAULT_EVAL_BSIZE = 6
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
self.model = ModelWrapper(device)
root = os.path.join(DATA_PATH, "Super_SloMo_inputs")
self.args = args = Namespace(**{
'dataset_root': f'{root}/dataset',
'batch_size': self.batch_size,
'init_learning_rate': 0.0001,
})
self.optimizer = optim.Adam(self.model.parameters(),
lr=args.init_learning_rate)
mean = [0.429, 0.431, 0.397]
std = [1, 1, 1]
normalize = transforms.Normalize(mean=mean,
std=std)
transform = transforms.Compose([transforms.ToTensor(), normalize])
trainset = SuperSloMo(root=args.dataset_root + '/train',
transform=transform, train=True)
loader = torch.utils.data.DataLoader(
trainset,
batch_size=self.args.batch_size,
shuffle=False)
data, frameIndex = next(iter(loader))
data = _prefetch(data, self.device)
self.example_inputs = frameIndex.to(self.device), *data
def get_module(self):
return self.model, self.example_inputs
def eval(self) -> Tuple[torch.Tensor]:
out = self.model(*self.example_inputs)
return out
def train(self):
self.optimizer.zero_grad()
Ft_p, loss = self.model(*self.example_inputs)
loss.backward()
self.optimizer.step()
|
from . import slomo_model as model
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
L1_lossFn = nn.L1Loss()
MSE_LossFn = nn.MSELoss()
class Model(torch.nn.Module):
def __init__(self, device='cpu'):
super().__init__()
self.flowComp = model.UNet(6, 4).to(device)
self.ArbTimeFlowIntrp = model.UNet(20, 5).to(device)
self.trainFlowBackWarp = model.backWarp(352, 352, device)
vgg16 = torchvision.models.vgg16(weights=torchvision.models.VGG16_Weights.IMAGENET1K_V1)
vgg16_conv_4_3 = nn.Sequential(*list(vgg16.children())[0][:22])
vgg16_conv_4_3.to(device)
for param in vgg16_conv_4_3.parameters():
param.requires_grad = False
self.vgg16_conv_4_3 = vgg16_conv_4_3
def forward(self, trainFrameIndex, I0, I1, IFrame):
# Calculate flow between reference frames I0 and I1
flowOut = self.flowComp(torch.cat((I0, I1), dim=1))
# Extracting flows between I0 and I1 - F_0_1 and F_1_0
F_0_1 = flowOut[:,:2,:,:]
F_1_0 = flowOut[:,2:,:,:]
fCoeff = model.getFlowCoeff(trainFrameIndex, I0.device, I0.dtype)
# Calculate intermediate flows
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
# Get intermediate frames from the intermediate flows
g_I0_F_t_0 = self.trainFlowBackWarp(I0, F_t_0)
g_I1_F_t_1 = self.trainFlowBackWarp(I1, F_t_1)
# Calculate optical flow residuals and visibility maps
intrpOut = self.ArbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
# Extract optical flow residuals and visibility maps
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = F.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
# Get intermediate frames from the intermediate flows
g_I0_F_t_0_f = self.trainFlowBackWarp(I0, F_t_0_f)
g_I1_F_t_1_f = self.trainFlowBackWarp(I1, F_t_1_f)
wCoeff = model.getWarpCoeff(trainFrameIndex, I0.device, I0.dtype)
# Calculate final intermediate frame
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
# Loss
recnLoss = L1_lossFn(Ft_p, IFrame)
prcpLoss = MSE_LossFn(self.vgg16_conv_4_3(Ft_p), self.vgg16_conv_4_3(IFrame))
warpLoss = L1_lossFn(g_I0_F_t_0, IFrame) + L1_lossFn(g_I1_F_t_1, IFrame) + L1_lossFn(self.trainFlowBackWarp(I0, F_1_0), I1) + L1_lossFn(self.trainFlowBackWarp(I1, F_0_1), I0)
loss_smooth_1_0 = torch.mean(torch.abs(F_1_0[:, :, :, :-1] - F_1_0[:, :, :, 1:])) + torch.mean(torch.abs(F_1_0[:, :, :-1, :] - F_1_0[:, :, 1:, :]))
loss_smooth_0_1 = torch.mean(torch.abs(F_0_1[:, :, :, :-1] - F_0_1[:, :, :, 1:])) + torch.mean(torch.abs(F_0_1[:, :, :-1, :] - F_0_1[:, :, 1:, :]))
loss_smooth = loss_smooth_1_0 + loss_smooth_0_1
# Total Loss - Coefficients 204 and 102 are used instead of 0.8 and 0.4
# since the loss in paper is calculated for input pixels in range 0-255
# and the input to our network is in range 0-1
loss = 204 * recnLoss + 102 * warpLoss + 0.005 * prcpLoss + loss_smooth
return Ft_p, loss
|
#!/usr/bin/env python3
import argparse
import os
import os.path
import ctypes
from shutil import rmtree, move
from PIL import Image
import torch
import torchvision.transforms as transforms
import slomo_model as model
import dataloader
import platform
from tqdm import tqdm
# For parsing commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument("--ffmpeg_dir", type=str, default="", help='path to ffmpeg.exe')
parser.add_argument("--video", type=str, required=True, help='path of video to be converted')
parser.add_argument("--checkpoint", type=str, required=True, help='path of checkpoint for pretrained model')
parser.add_argument("--fps", type=float, default=30, help='specify fps of output video. Default: 30.')
parser.add_argument("--sf", type=int, required=True, help='specify the slomo factor N. This will increase the frames by Nx. Example sf=2 ==> 2x frames')
parser.add_argument("--batch_size", type=int, default=1, help='Specify batch size for faster conversion. This will depend on your cpu/gpu memory. Default: 1')
parser.add_argument("--output", type=str, default="output.mkv", help='Specify output file name. Default: output.mp4')
args = parser.parse_args()
def check():
"""
Checks the validity of commandline arguments.
Parameters
----------
None
Returns
-------
error : string
Error message if error occurs otherwise blank string.
"""
error = ""
if (args.sf < 2):
error = "Error: --sf/slomo factor has to be atleast 2"
if (args.batch_size < 1):
error = "Error: --batch_size has to be atleast 1"
if (args.fps < 1):
error = "Error: --fps has to be atleast 1"
if ".mkv" not in args.output:
error = "output needs to have mkv container"
return error
def extract_frames(video, outDir):
"""
Converts the `video` to images.
Parameters
----------
video : string
full path to the video file.
outDir : string
path to directory to output the extracted images.
Returns
-------
error : string
Error message if error occurs otherwise blank string.
"""
error = ""
print('{} -i {} -vsync 0 {}/%06d.png'.format(os.path.join(args.ffmpeg_dir, "ffmpeg"), video, outDir))
retn = os.system('{} -i "{}" -vsync 0 {}/%06d.png'.format(os.path.join(args.ffmpeg_dir, "ffmpeg"), video, outDir))
if retn:
error = "Error converting file:{}. Exiting.".format(video)
return error
def create_video(dir):
error = ""
print('{} -r {} -i {}/%d.png -vcodec ffvhuff {}'.format(os.path.join(args.ffmpeg_dir, "ffmpeg"), args.fps, dir, args.output))
retn = os.system('{} -r {} -i {}/%d.png -vcodec ffvhuff "{}"'.format(os.path.join(args.ffmpeg_dir, "ffmpeg"), args.fps, dir, args.output))
if retn:
error = "Error creating output video. Exiting."
return error
def main():
# Check if arguments are okay
error = check()
if error:
print(error)
exit(1)
# Create extraction folder and extract frames
IS_WINDOWS = 'Windows' == platform.system()
extractionDir = "tmpSuperSloMo"
if not IS_WINDOWS:
# Assuming UNIX-like system where "." indicates hidden directories
extractionDir = "." + extractionDir
if os.path.isdir(extractionDir):
rmtree(extractionDir)
os.mkdir(extractionDir)
if IS_WINDOWS:
FILE_ATTRIBUTE_HIDDEN = 0x02
# ctypes.windll only exists on Windows
ctypes.windll.kernel32.SetFileAttributesW(extractionDir, FILE_ATTRIBUTE_HIDDEN)
extractionPath = os.path.join(extractionDir, "input")
outputPath = os.path.join(extractionDir, "output")
os.mkdir(extractionPath)
os.mkdir(outputPath)
error = extract_frames(args.video, extractionPath)
if error:
print(error)
exit(1)
# Initialize transforms
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
mean = [0.429, 0.431, 0.397]
std = [1, 1, 1]
normalize = transforms.Normalize(mean=mean,
std=std)
negmean = [x * -1 for x in mean]
revNormalize = transforms.Normalize(mean=negmean, std=std)
# Temporary fix for issue #7 https://github.com/avinashpaliwal/Super-SloMo/issues/7 -
# - Removed per channel mean subtraction for CPU.
if (device == "cpu"):
transform = transforms.Compose([transforms.ToTensor()])
TP = transforms.Compose([transforms.ToPILImage()])
else:
transform = transforms.Compose([transforms.ToTensor(), normalize])
TP = transforms.Compose([revNormalize, transforms.ToPILImage()])
# Load data
videoFrames = dataloader.Video(root=extractionPath, transform=transform)
videoFramesloader = torch.utils.data.DataLoader(videoFrames, batch_size=args.batch_size, shuffle=False)
# Initialize model
flowComp = model.UNet(6, 4)
flowComp.to(device)
for param in flowComp.parameters():
param.requires_grad = False
ArbTimeFlowIntrp = model.UNet(20, 5)
ArbTimeFlowIntrp.to(device)
for param in ArbTimeFlowIntrp.parameters():
param.requires_grad = False
flowBackWarp = model.backWarp(videoFrames.dim[0], videoFrames.dim[1], device)
flowBackWarp = flowBackWarp.to(device)
dict1 = torch.load(args.checkpoint, map_location='cpu')
ArbTimeFlowIntrp.load_state_dict(dict1['state_dictAT'])
flowComp.load_state_dict(dict1['state_dictFC'])
# Interpolate frames
frameCounter = 1
with torch.no_grad():
for _, (frame0, frame1) in enumerate(tqdm(videoFramesloader), 0):
I0 = frame0.to(device)
I1 = frame1.to(device)
flowOut = flowComp(torch.cat((I0, I1), dim=1))
F_0_1 = flowOut[:,:2,:,:]
F_1_0 = flowOut[:,2:,:,:]
# Save reference frames in output folder
for batchIndex in range(args.batch_size):
(TP(frame0[batchIndex].detach())).resize(videoFrames.origDim, Image.BILINEAR).save(os.path.join(outputPath, str(frameCounter + args.sf * batchIndex) + ".png"))
frameCounter += 1
# Generate intermediate frames
for intermediateIndex in range(1, args.sf):
t = float(intermediateIndex) / args.sf
temp = -t * (1 - t)
fCoeff = [temp, t * t, (1 - t) * (1 - t), temp]
F_t_0 = fCoeff[0] * F_0_1 + fCoeff[1] * F_1_0
F_t_1 = fCoeff[2] * F_0_1 + fCoeff[3] * F_1_0
g_I0_F_t_0 = flowBackWarp(I0, F_t_0)
g_I1_F_t_1 = flowBackWarp(I1, F_t_1)
intrpOut = ArbTimeFlowIntrp(torch.cat((I0, I1, F_0_1, F_1_0, F_t_1, F_t_0, g_I1_F_t_1, g_I0_F_t_0), dim=1))
F_t_0_f = intrpOut[:, :2, :, :] + F_t_0
F_t_1_f = intrpOut[:, 2:4, :, :] + F_t_1
V_t_0 = torch.sigmoid(intrpOut[:, 4:5, :, :])
V_t_1 = 1 - V_t_0
g_I0_F_t_0_f = flowBackWarp(I0, F_t_0_f)
g_I1_F_t_1_f = flowBackWarp(I1, F_t_1_f)
wCoeff = [1 - t, t]
Ft_p = (wCoeff[0] * V_t_0 * g_I0_F_t_0_f + wCoeff[1] * V_t_1 * g_I1_F_t_1_f) / (wCoeff[0] * V_t_0 + wCoeff[1] * V_t_1)
# Save intermediate frame
for batchIndex in range(args.batch_size):
(TP(Ft_p[batchIndex].cpu().detach())).resize(videoFrames.origDim, Image.BILINEAR).save(os.path.join(outputPath, str(frameCounter + args.sf * batchIndex) + ".png"))
frameCounter += 1
# Set counter accounting for batching of frames
frameCounter += args.sf * (args.batch_size - 1)
# Generate video from interpolated frames
create_video(outputPath)
# Remove temporary files
rmtree(extractionDir)
exit(0)
main()
|
#[Super SloMo]
##High Quality Estimation of Multiple Intermediate Frames for Video Interpolation
import argparse
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import slomo_model as model
from model_wrapper import Model
import dataloader
from math import log10
import datetime
from tensorboardX import SummaryWriter
import random
random.seed(1337)
torch.manual_seed(1337)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# For parsing commandline arguments
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_root", type=str, required=True, help='path to dataset folder containing train-test-validation folders')
parser.add_argument("--checkpoint_dir", type=str, required=True, help='path to folder for saving checkpoints')
parser.add_argument("--checkpoint", type=str, help='path of checkpoint for pretrained model')
parser.add_argument("--epochs", type=int, default=200, help='number of epochs to train. Default: 200.')
parser.add_argument("--train_batch_size", type=int, default=6, help='batch size for training. Default: 6.')
parser.add_argument("--init_learning_rate", type=float, default=0.0001, help='set initial learning rate. Default: 0.0001.')
parser.add_argument("--milestones", type=list, default=[100, 150], help='Set to epoch values where you want to decrease learning rate by a factor of 0.1. Default: [100, 150]')
parser.add_argument("--checkpoint_epoch", type=int, default=5, help='checkpoint saving frequency. N: after every N epochs. Each checkpoint is roughly of size 151 MB.Default: 5.')
parser.add_argument("--debug", type=str, default=None, help='dump model output')
parser.add_argument("--trace", action='store_true', default=False, help='trace model')
parser.add_argument("--script", action='store_true', default=False, help='script model')
args = parser.parse_args()
##[TensorboardX](https://github.com/lanpa/tensorboardX)
### For visualizing loss and interpolated frames
writer = SummaryWriter('log')
###Initialize flow computation and arbitrary-time flow interpolation CNNs.
assert torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
###Load Datasets
# Channel wise mean calculated on adobe240-fps training dataset
mean = [0.429, 0.431, 0.397]
std = [1, 1, 1]
normalize = transforms.Normalize(mean=mean,
std=std)
transform = transforms.Compose([transforms.ToTensor(), normalize])
trainset = dataloader.SuperSloMo(root=args.dataset_root + '/train', transform=transform, train=True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch_size, shuffle=False)
print(trainset)
###Create transform to display image from tensor
negmean = [x * -1 for x in mean]
revNormalize = transforms.Normalize(mean=negmean, std=std)
TP = transforms.Compose([revNormalize, transforms.ToPILImage()])
###Utils
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
###Model, Loss and Optimizer
the_model = Model(device)
optimizer = optim.Adam(the_model.parameters(), lr=args.init_learning_rate)
# scheduler to decrease learning rate by a factor of 10 at milestones.
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.milestones, gamma=0.1)
### Initialization
dict1 = {'loss': [], 'valLoss': [], 'valPSNR': [], 'epoch': -1}
### Training
import time
start = time.time()
cLoss = dict1['loss']
valLoss = dict1['valLoss']
valPSNR = dict1['valPSNR']
checkpoint_counter = 0
if args.trace:
for trainData, trainFrameIndex in trainloader:
frame0, frameT, frame1 = trainData
I0 = frame0.to(device)
I1 = frame1.to(device)
IFrame = frameT.to(device)
the_model = torch.jit.trace(the_model, example_inputs=(trainFrameIndex, I0, I1, IFrame))
break
if args.script:
the_model = torch.jit.script(the_model)
### Main training loop
for epoch in range(dict1['epoch'] + 1, args.epochs):
print("Epoch: ", epoch)
# Append and reset
cLoss.append([])
valLoss.append([])
valPSNR.append([])
iLoss = 0
# Increment scheduler count
scheduler.step()
for trainIndex, (trainData, trainFrameIndex) in enumerate(trainloader, 0):
## Getting the input and the target from the training set
frame0, frameT, frame1 = trainData
I0 = frame0.to(device)
I1 = frame1.to(device)
IFrame = frameT.to(device)
optimizer.zero_grad()
Ft_p, loss = the_model(trainFrameIndex, I0, I1, IFrame)
if args.debug:
torch.save(Ft_p, args.debug)
# Backpropagate
loss.backward()
optimizer.step()
iLoss += loss.item()
|
import torch.utils.data as data
from PIL import Image
import os
import os.path
import random
def _make_dataset(dir):
"""
Creates a 2D list of all the frames in N clips containing
M frames each.
2D List Structure:
[[frame00, frame01,...frameM] <-- clip0
[frame00, frame01,...frameM] <-- clip0
:
[frame00, frame01,...frameM]] <-- clipN
Parameters
----------
dir : string
root directory containing clips.
Returns
-------
list
2D list described above.
"""
framesPath = []
# Find and loop over all the clips in root `dir`.
for index, folder in enumerate(os.listdir(dir)):
clipsFolderPath = os.path.join(dir, folder)
# Skip items which are not folders.
if not (os.path.isdir(clipsFolderPath)):
continue
framesPath.append([])
# Find and loop over all the frames inside the clip.
for image in sorted(os.listdir(clipsFolderPath)):
# Add path to list.
framesPath[index].append(os.path.join(clipsFolderPath, image))
return framesPath
def _make_video_dataset(dir):
"""
Creates a 1D list of all the frames.
1D List Structure:
[frame0, frame1,...frameN]
Parameters
----------
dir : string
root directory containing frames.
Returns
-------
list
1D list described above.
"""
framesPath = []
# Find and loop over all the frames in root `dir`.
for image in sorted(os.listdir(dir)):
# Add path to list.
framesPath.append(os.path.join(dir, image))
return framesPath
def _pil_loader(path, cropArea=None, resizeDim=None, frameFlip=0):
"""
Opens image at `path` using pil and applies data augmentation.
Parameters
----------
path : string
path of the image.
cropArea : tuple, optional
coordinates for cropping image. Default: None
resizeDim : tuple, optional
dimensions for resizing image. Default: None
frameFlip : int, optional
Non zero to flip image horizontally. Default: 0
Returns
-------
list
2D list described above.
"""
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
# Resize image if specified.
resized_img = img.resize(resizeDim, Image.ANTIALIAS) if (resizeDim != None) else img
# Crop image if crop area specified.
cropped_img = img.crop(cropArea) if (cropArea != None) else resized_img
# Flip image horizontally if specified.
flipped_img = cropped_img.transpose(Image.FLIP_LEFT_RIGHT) if frameFlip else cropped_img
return flipped_img.convert('RGB')
class SuperSloMo(data.Dataset):
"""
A dataloader for loading N samples arranged in this way:
|-- clip0
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
|-- clip1
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
:
:
|-- clipN
|-- frame00
|-- frame01
:
|-- frame11
|-- frame12
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None, dim=(640, 360), randomCropSize=(352, 352), train=True):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
dim : tuple, optional
Dimensions of images in dataset. Default: (640, 360)
randomCropSize : tuple, optional
Dimensions of random crop to be applied. Default: (352, 352)
train : boolean, optional
Specifies if the dataset is for training or testing/validation.
`True` returns samples with data augmentation like random
flipping, random cropping, etc. while `False` returns the
samples without randomization. Default: True
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_dataset(root)
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"))
self.randomCropSize = randomCropSize
self.cropX0 = dim[0] - randomCropSize[0]
self.cropY0 = dim[1] - randomCropSize[1]
self.root = root
self.transform = transform
self.train = train
self.framesPath = framesPath
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1 -
and a random frame chosen from the 7 intermediate frames
available between I0 and I1 along with it's relative index.
Parameters
----------
index : int
Index
Returns
-------
tuple
(sample, returnIndex) where sample is
[I0, intermediate_frame, I1] and returnIndex is
the position of `random_intermediate_frame`.
e.g.- `returnIndex` of frame next to I0 would be 0 and
frame before I1 would be 6.
"""
sample = []
if (self.train):
### Data Augmentation ###
# To select random 9 frames from 12 frames in a clip
firstFrame = random.randint(0, 3)
# Apply random crop on the 9 input frames
cropX = random.randint(0, self.cropX0)
cropY = random.randint(0, self.cropY0)
cropArea = (cropX, cropY, cropX + self.randomCropSize[0], cropY + self.randomCropSize[1])
# Random reverse frame
#frameRange = range(firstFrame, firstFrame + 9) if (random.randint(0, 1)) else range(firstFrame + 8, firstFrame - 1, -1)
IFrameIndex = random.randint(firstFrame + 1, firstFrame + 7)
if (random.randint(0, 1)):
frameRange = [firstFrame, IFrameIndex, firstFrame + 8]
returnIndex = IFrameIndex - firstFrame - 1
else:
frameRange = [firstFrame + 8, IFrameIndex, firstFrame]
returnIndex = firstFrame - IFrameIndex + 7
# Random flip frame
randomFrameFlip = random.randint(0, 1)
else:
# Fixed settings to return same samples every epoch.
# For validation/test sets.
firstFrame = 0
cropArea = (0, 0, self.randomCropSize[0], self.randomCropSize[1])
IFrameIndex = ((index) % 7 + 1)
returnIndex = IFrameIndex - 1
frameRange = [0, IFrameIndex, 8]
randomFrameFlip = 0
# Loop over for all frames corresponding to the `index`.
for frameIndex in frameRange:
# Open image using pil and augment the image.
image = _pil_loader(self.framesPath[index][frameIndex], cropArea=cropArea, frameFlip=randomFrameFlip)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample, returnIndex
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
return len(self.framesPath)
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class UCI101Test(data.Dataset):
"""
A dataloader for loading N samples arranged in this way:
|-- clip0
|-- frame00
|-- frame01
|-- frame02
|-- clip1
|-- frame00
|-- frame01
|-- frame02
:
:
|-- clipN
|-- frame00
|-- frame01
|-- frame02
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_dataset(root)
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in subfolders of: " + root + "\n"))
self.root = root
self.framesPath = framesPath
self.transform = transform
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1 -
and a intermediate frame between I0 and I1.
Parameters
----------
index : int
Index
Returns
-------
tuple
(sample, returnIndex) where sample is
[I0, intermediate_frame, I1] and returnIndex is
the position of `intermediate_frame`.
The returnIndex is always 3 and is being returned
to maintain compatibility with the `SuperSloMo`
dataloader where 3 corresponds to the middle frame.
"""
sample = []
# Loop over for all frames corresponding to the `index`.
for framePath in self.framesPath[index]:
# Open image using pil.
image = _pil_loader(framePath)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample, 3
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
return len(self.framesPath)
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class Video(data.Dataset):
"""
A dataloader for loading all video frames in a folder:
|-- frame0
|-- frame1
:
:
|-- frameN
...
Attributes
----------
framesPath : list
List of frames' path in the dataset.
origDim : tuple
original dimensions of the video.
dim : tuple
resized dimensions of the video (for CNN).
Methods
-------
__getitem__(index)
Returns the sample corresponding to `index` from dataset.
__len__()
Returns the size of dataset. Invoked as len(datasetObj).
__repr__()
Returns printable representation of the dataset object.
"""
def __init__(self, root, transform=None):
"""
Parameters
----------
root : string
Root directory path.
transform : callable, optional
A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
"""
# Populate the list with image paths for all the
# frame in `root`.
framesPath = _make_video_dataset(root)
# Get dimensions of frames
frame = _pil_loader(framesPath[0])
self.origDim = frame.size
self.dim = int(self.origDim[0] / 32) * 32, int(self.origDim[1] / 32) * 32
# Raise error if no images found in root.
if len(framesPath) == 0:
raise(RuntimeError("Found 0 files in: " + root + "\n"))
self.root = root
self.framesPath = framesPath
self.transform = transform
def __getitem__(self, index):
"""
Returns the sample corresponding to `index` from dataset.
The sample consists of two reference frames - I0 and I1.
Parameters
----------
index : int
Index
Returns
-------
list
sample is [I0, I1] where I0 is the frame with index
`index` and I1 is the next frame.
"""
sample = []
# Loop over for all frames corresponding to the `index`.
for framePath in [self.framesPath[index], self.framesPath[index + 1]]:
# Open image using pil.
image = _pil_loader(framePath, resizeDim=self.dim)
# Apply transformation if specified.
if self.transform is not None:
image = self.transform(image)
sample.append(image)
return sample
def __len__(self):
"""
Returns the size of dataset. Invoked as len(datasetObj).
Returns
-------
int
number of samples.
"""
# Using `-1` so that dataloader accesses only upto
# frames [N-1, N] and not [N, N+1] which because frame
# N+1 doesn't exist.
return len(self.framesPath) - 1
def __repr__(self):
"""
Returns printable representation of the dataset object.
Returns
-------
string
info.
"""
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str |
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
"""
Converts a Video to SuperSloMo version
"""
from time import time
import click
import cv2
import torch
from PIL import Image
import numpy as np
import slomo_model as model
from torchvision import transforms
import torch.nn.functional as F
torch.set_grad_enabled(False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trans_forward = transforms.ToTensor()
trans_backward = transforms.ToPILImage()
if device != "cpu":
mean = [0.429, 0.431, 0.397]
mea0 = [-m for m in mean]
std = [1] * 3
trans_forward = transforms.Compose([trans_forward, transforms.Normalize(mean=mean, std=std)])
trans_backward = transforms.Compose([transforms.Normalize(mean=mea0, std=std), trans_backward])
flow = model.UNet(6, 4).to(device)
interp = model.UNet(20, 5).to(device)
back_warp = None
def setup_back_warp(w, h):
global back_warp
with torch.set_grad_enabled(False):
back_warp = model.backWarp(w, h, device).to(device)
def load_models(checkpoint):
states = torch.load(checkpoint, map_location='cpu')
interp.load_state_dict(states['state_dictAT'])
flow.load_state_dict(states['state_dictFC'])
def interpolate_batch(frames, factor):
frame0 = torch.stack(frames[:-1])
frame1 = torch.stack(frames[1:])
i0 = frame0.to(device)
i1 = frame1.to(device)
ix = torch.cat([i0, i1], dim=1)
flow_out = flow(ix)
f01 = flow_out[:, :2, :, :]
f10 = flow_out[:, 2:, :, :]
frame_buffer = []
for i in range(1, factor):
t = i / factor
temp = -t * (1 - t)
co_eff = [temp, t * t, (1 - t) * (1 - t), temp]
ft0 = co_eff[0] * f01 + co_eff[1] * f10
ft1 = co_eff[2] * f01 + co_eff[3] * f10
gi0ft0 = back_warp(i0, ft0)
gi1ft1 = back_warp(i1, ft1)
iy = torch.cat((i0, i1, f01, f10, ft1, ft0, gi1ft1, gi0ft0), dim=1)
io = interp(iy)
ft0f = io[:, :2, :, :] + ft0
ft1f = io[:, 2:4, :, :] + ft1
vt0 = F.sigmoid(io[:, 4:5, :, :])
vt1 = 1 - vt0
gi0ft0f = back_warp(i0, ft0f)
gi1ft1f = back_warp(i1, ft1f)
co_eff = [1 - t, t]
ft_p = (co_eff[0] * vt0 * gi0ft0f + co_eff[1] * vt1 * gi1ft1f) / \
(co_eff[0] * vt0 + co_eff[1] * vt1)
frame_buffer.append(ft_p)
return frame_buffer
def load_batch(video_in, batch_size, batch, w, h):
if len(batch) > 0:
batch = [batch[-1]]
for i in range(batch_size):
ok, frame = video_in.read()
if not ok:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
frame = frame.resize((w, h), Image.ANTIALIAS)
frame = frame.convert('RGB')
frame = trans_forward(frame)
batch.append(frame)
return batch
def denorm_frame(frame, w0, h0):
frame = frame.cpu()
frame = trans_backward(frame)
frame = frame.resize((w0, h0), Image.BILINEAR)
frame = frame.convert('RGB')
return np.array(frame)[:, :, ::-1].copy()
def convert_video(source, dest, factor, batch_size=10, output_format='mp4v', output_fps=30):
vin = cv2.VideoCapture(source)
count = vin.get(cv2.CAP_PROP_FRAME_COUNT)
w0, h0 = int(vin.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vin.get(cv2.CAP_PROP_FRAME_HEIGHT))
codec = cv2.VideoWriter_fourcc(*output_format)
vout = cv2.VideoWriter(dest, codec, float(output_fps), (w0, h0))
w, h = (w0 // 32) * 32, (h0 // 32) * 32
setup_back_warp(w, h)
done = 0
batch = []
while True:
batch = load_batch(vin, batch_size, batch, w, h)
if len(batch) == 1:
break
done += len(batch) - 1
intermediate_frames = interpolate_batch(batch, factor)
intermediate_frames = list(zip(*intermediate_frames))
for fid, iframe in enumerate(intermediate_frames):
vout.write(denorm_frame(batch[fid], w0, h0))
for frm in iframe:
vout.write(denorm_frame(frm, w0, h0))
try:
yield len(batch), done, count
except StopIteration:
break
vout.write(denorm_frame(batch[0], w0, h0))
vin.release()
vout.release()
@click.command('Evaluate Model by converting a low-FPS video to high-fps')
@click.argument('input')
@click.option('--checkpoint', help='Path to model checkpoint')
@click.option('--output', help='Path to output file to save')
@click.option('--batch', default=2, help='Number of frames to process in single forward pass')
@click.option('--scale', default=4, help='Scale Factor of FPS')
@click.option('--fps', default=30, help='FPS of output video')
def main(input, checkpoint, output, batch, scale, fps):
avg = lambda x, n, x0: (x * n/(n+1) + x0 / (n+1), n+1)
load_models(checkpoint)
t0 = time()
n0 = 0
fpx = 0
for dl, fd, fc in convert_video(input, output, int(scale), int(batch), output_fps=int(fps)):
fpx, n0 = avg(fpx, n0, dl / (time() - t0))
prg = int(100*fd/fc)
eta = (fc - fd) / fpx
print('\rDone: {:03d}% FPS: {:05.2f} ETA: {:.2f}s'.format(prg, fpx, eta) + ' '*5, end='')
t0 = time()
if __name__ == '__main__':
main()
|
import torch
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
# Initialize convolutional layers.
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
# Average pooling with kernel size 2 (2 x 2).
x = F.avg_pool2d(x, 2)
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
return x
class up(nn.Module):
"""
A class for creating neural network blocks containing layers:
Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x, skpCn)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used for setting input and output channels for
the second convolutional layer.
"""
super(up, self).__init__()
# Initialize convolutional layers.
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
# (2 * outChannels) is used for accommodating skip connection.
self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1, padding=1)
def forward(self, x, skpCn):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
skpCn : tensor
skip connection input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
# Bilinear interpolation with scaling 2.
x = F.interpolate(x, scale_factor=2., mode='bilinear')
# Convolution + Leaky ReLU
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
# Convolution + Leaky ReLU on (`x`, `skpCn`)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)), negative_slope = 0.1)
return x
class UNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(UNet, self).__init__()
# Initialize neural network blocks.
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
x = F.leaky_relu(self.conv1(x), negative_slope = 0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope = 0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope = 0.1)
return x
class backWarp(nn.Module):
"""
A class for creating a backwarping object.
This is used for backwarping to an image:
Given optical flow from frame I0 to I1 --> F_0_1 and frame I1,
it generates I0 <-- backwarp(F_0_1, I1).
...
Methods
-------
forward(x)
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
"""
def __init__(self, W, H, device):
"""
Parameters
----------
W : int
width of the image.
H : int
height of the image.
device : device
computation device (cpu/cuda).
"""
super(backWarp, self).__init__()
# create a grid
self.W = W
self.H = H
# Use torch.meshgrid instead of np.meshgrid to imrpove performance
# https://github.com/avinashpaliwal/Super-SloMo/pull/111
self.gridX, self.gridY = torch.meshgrid(torch.arange(W, requires_grad=False, device=device),
torch.arange(H, requires_grad=False, device=device), indexing='xy')
def forward(self, img, flow):
"""
Returns output tensor after passing input `img` and `flow` to the backwarping
block.
I0 = backwarp(I1, F_0_1)
Parameters
----------
img : tensor
frame I1.
flow : tensor
optical flow from I0 and I1: F_0_1.
Returns
-------
tensor
frame I0.
"""
# Extract horizontal and vertical flows.
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
x = self.gridX.unsqueeze(0).expand_as(u).to(dtype=u.dtype) + u
y = self.gridY.unsqueeze(0).expand_as(v).to(dtype=u.dtype) + v
# range -1 to 1
x = 2*(x/self.W - 0.5)
y = 2*(y/self.H - 0.5)
# stacking X and Y
grid = torch.stack((x,y), dim=3)
# Sample pixels using bilinear interpolation.
imgOut = torch.nn.functional.grid_sample(img, grid)
return imgOut
# Creating an array of `t` values for the 7 intermediate frames between
# reference frames I0 and I1.
def getFlowCoeff (indices, device: torch.device, dtype: torch.dtype):
"""
Gets flow coefficients used for calculating intermediate optical
flows from optical flows between I0 and I1: F_0_1 and F_1_0.
F_t_0 = C00 x F_0_1 + C01 x F_1_0
F_t_1 = C10 x F_0_1 + C11 x F_1_0
where,
C00 = -(1 - t) x t
C01 = t x t
C10 = (1 - t) x (1 - t)
C11 = -t x (1 - t)
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C00, C01, C10, C11.
"""
t = torch.linspace(0.125, 0.875, 7, device=device, dtype=dtype)
# Convert indices tensor to numpy array
# ind = indices.detach().numpy()
ind = indices
C11 = C00 = - (1 - (t[ind])) * (t[ind])
C01 = (t[ind]) * (t[ind])
C10 = (1 - (t[ind])) * (1 - (t[ind]))
return C00[None, None, None, :].permute(3, 0, 1, 2).to(device), C01[None, None, None, :].permute(3, 0, 1, 2).to(device), C10[None, None, None, :].permute(3, 0, 1, 2).to(device), C11[None, None, None, :].permute(3, 0, 1, 2).to(device)
def getWarpCoeff (indices, device: torch.device, dtype: torch.dtype):
"""
Gets coefficients used for calculating final intermediate
frame `It_gen` from backwarped images using flows F_t_0 and F_t_1.
It_gen = (C0 x V_t_0 x g_I_0_F_t_0 + C1 x V_t_1 x g_I_1_F_t_1) / (C0 x V_t_0 + C1 x V_t_1)
where,
C0 = 1 - t
C1 = t
V_t_0, V_t_1 --> visibility maps
g_I_0_F_t_0, g_I_1_F_t_1 --> backwarped intermediate frames
Parameters
----------
indices : tensor
indices corresponding to the intermediate frame positions
of all samples in the batch.
device : device
computation device (cpu/cuda).
Returns
-------
tensor
coefficients C0 and C1.
"""
t = torch.linspace(0.125, 0.875, 7, device=device, dtype=dtype)
# Convert indices tensor to numpy array
ind = indices
C0 = 1 - t[ind]
C1 = t[ind]
return C0[None, None, None, :].permute(3, 0, 1, 2).to(device), C1[None, None, None, :].permute(3, 0, 1, 2).to(device)
|
''' Translate input text with trained model. '''
import torch
import argparse
import dill as pickle
from tqdm import tqdm
import transformer.Constants as Constants
from torchbenchmark.util.torchtext_legacy.data import Dataset
from transformer.Models import Transformer
from transformer.Translator import Translator
def load_model(opt, device):
checkpoint = torch.load(opt.model, map_location=device)
model_opt = checkpoint['settings']
model = Transformer(
model_opt.src_vocab_size,
model_opt.trg_vocab_size,
model_opt.src_pad_idx,
model_opt.trg_pad_idx,
trg_emb_prj_weight_sharing=model_opt.proj_share_weight,
emb_src_trg_weight_sharing=model_opt.embs_share_weight,
d_k=model_opt.d_k,
d_v=model_opt.d_v,
d_model=model_opt.d_model,
d_word_vec=model_opt.d_word_vec,
d_inner=model_opt.d_inner_hid,
n_layers=model_opt.n_layers,
n_head=model_opt.n_head,
dropout=model_opt.dropout).to(device)
model.load_state_dict(checkpoint['model'])
print('[Info] Trained model state loaded.')
return model
def main():
'''Main Function'''
parser = argparse.ArgumentParser(description='translate.py')
parser.add_argument('-model', required=True,
help='Path to model weight file')
parser.add_argument('-data_pkl', required=True,
help='Pickle file with both instances and vocabulary.')
parser.add_argument('-output', default='pred.txt',
help="""Path to output the predictions (each line will
be the decoded sequence""")
parser.add_argument('-beam_size', type=int, default=5)
parser.add_argument('-max_seq_len', type=int, default=100)
parser.add_argument('-no_cuda', action='store_true')
# TODO: Translate bpe encoded files
#parser.add_argument('-src', required=True,
# help='Source sequence to decode (one line per sequence)')
#parser.add_argument('-vocab', required=True,
# help='Source sequence to decode (one line per sequence)')
# TODO: Batch translation
#parser.add_argument('-batch_size', type=int, default=30,
# help='Batch size')
#parser.add_argument('-n_best', type=int, default=1,
# help="""If verbose is set, will output the n_best
# decoded sentences""")
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
data = pickle.load(open(opt.data_pkl, 'rb'))
SRC, TRG = data['vocab']['src'], data['vocab']['trg']
opt.src_pad_idx = SRC.vocab.stoi[Constants.PAD_WORD]
opt.trg_pad_idx = TRG.vocab.stoi[Constants.PAD_WORD]
opt.trg_bos_idx = TRG.vocab.stoi[Constants.BOS_WORD]
opt.trg_eos_idx = TRG.vocab.stoi[Constants.EOS_WORD]
test_loader = Dataset(examples=data['test'], fields={'src': SRC, 'trg': TRG})
device = torch.device('cuda' if opt.cuda else 'cpu')
translator = Translator(
model=load_model(opt, device),
beam_size=opt.beam_size,
max_seq_len=opt.max_seq_len,
src_pad_idx=opt.src_pad_idx,
trg_pad_idx=opt.trg_pad_idx,
trg_bos_idx=opt.trg_bos_idx,
trg_eos_idx=opt.trg_eos_idx).to(device)
unk_idx = SRC.vocab.stoi[SRC.unk_token]
with open(opt.output, 'w') as f:
for example in tqdm(test_loader, mininterval=2, desc=' - (Test)', leave=False):
#print(' '.join(example.src))
src_seq = [SRC.vocab.stoi.get(word, unk_idx) for word in example.src]
pred_seq = translator.translate_sentence(torch.LongTensor([src_seq]).to(device))
pred_line = ' '.join(TRG.vocab.itos[idx] for idx in pred_seq)
pred_line = pred_line.replace(Constants.BOS_WORD, '').replace(Constants.EOS_WORD, '')
#print(pred_line)
f.write(pred_line.strip() + '\n')
print('[Info] Finished.')
if __name__ == "__main__":
'''
Usage: python translate.py -model trained.chkpt -data multi30k.pt -no_cuda
'''
main()
|
''' Handling the data io '''
import contextlib
import os
import pathlib
import argparse
import logging
import dill as pickle
import urllib
from tqdm import tqdm
import json
import sys
import codecs
import spacy
import torch
import tarfile
import torchtext.data
import torchtext.datasets
# Handle torchtext_legacy import
@contextlib.contextmanager
def _with_sys_path(path):
"""Temporarily add the given path to `sys.path`"""
path = os.fspath(path)
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
package_root = pathlib.Path(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
with _with_sys_path(package_root):
from torchbenchmark.util.torchtext_legacy.field import Field
from torchbenchmark.util.torchtext_legacy.translation import TranslationDataset, Multi30k
import transformer.Constants as Constants
from learn_bpe import learn_bpe
from apply_bpe import BPE
__author__ = "Yu-Hsiang Huang"
_TRAIN_DATA_SOURCES = [
{"url": "http://data.statmt.org/wmt17/translation-task/" \
"training-parallel-nc-v12.tgz",
"trg": "news-commentary-v12.de-en.en",
"src": "news-commentary-v12.de-en.de"},
#{"url": "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz",
# "trg": "commoncrawl.de-en.en",
# "src": "commoncrawl.de-en.de"},
#{"url": "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz",
# "trg": "europarl-v7.de-en.en",
# "src": "europarl-v7.de-en.de"}
]
_VAL_DATA_SOURCES = [
{"url": "http://data.statmt.org/wmt17/translation-task/dev.tgz",
"trg": "newstest2013.en",
"src": "newstest2013.de"}]
_TEST_DATA_SOURCES = [
{"url": "https://storage.googleapis.com/tf-perf-public/" \
"official_transformer/test_data/newstest2014.tgz",
"trg": "newstest2014.en",
"src": "newstest2014.de"}]
class TqdmUpTo(tqdm):
def update_to(self, b=1, bsize=1, tsize=None):
if tsize is not None:
self.total = tsize
self.update(b * bsize - self.n)
def file_exist(dir_name, file_name):
for sub_dir, _, files in os.walk(dir_name):
if file_name in files:
return os.path.join(sub_dir, file_name)
return None
def download_and_extract(download_dir, url, src_filename, trg_filename):
src_path = file_exist(download_dir, src_filename)
trg_path = file_exist(download_dir, trg_filename)
if src_path and trg_path:
sys.stderr.write(f"Already downloaded and extracted {url}.\n")
return src_path, trg_path
compressed_file = _download_file(download_dir, url)
sys.stderr.write(f"Extracting {compressed_file}.\n")
with tarfile.open(compressed_file, "r:gz") as corpus_tar:
corpus_tar.extractall(download_dir)
src_path = file_exist(download_dir, src_filename)
trg_path = file_exist(download_dir, trg_filename)
if src_path and trg_path:
return src_path, trg_path
raise OSError(f"Download/extraction failed for url {url} to path {download_dir}")
def _download_file(download_dir, url):
filename = url.split("/")[-1]
if file_exist(download_dir, filename):
sys.stderr.write(f"Already downloaded: {url} (at {filename}).\n")
else:
sys.stderr.write(f"Downloading from {url} to {filename}.\n")
with TqdmUpTo(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
urllib.request.urlretrieve(url, filename=filename, reporthook=t.update_to)
return filename
def get_raw_files(raw_dir, sources):
raw_files = { "src": [], "trg": [], }
for d in sources:
src_file, trg_file = download_and_extract(raw_dir, d["url"], d["src"], d["trg"])
raw_files["src"].append(src_file)
raw_files["trg"].append(trg_file)
return raw_files
def mkdir_if_needed(dir_name):
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
def compile_files(raw_dir, raw_files, prefix):
src_fpath = os.path.join(raw_dir, f"raw-{prefix}.src")
trg_fpath = os.path.join(raw_dir, f"raw-{prefix}.trg")
if os.path.isfile(src_fpath) and os.path.isfile(trg_fpath):
sys.stderr.write(f"Merged files found, skip the merging process.\n")
return src_fpath, trg_fpath
sys.stderr.write(f"Merge files into two files: {src_fpath} and {trg_fpath}.\n")
with open(src_fpath, 'w') as src_outf, open(trg_fpath, 'w') as trg_outf:
for src_inf, trg_inf in zip(raw_files['src'], raw_files['trg']):
sys.stderr.write(f' Input files: \n'\
f' - SRC: {src_inf}, and\n' \
f' - TRG: {trg_inf}.\n')
with open(src_inf, newline='\n') as src_inf, open(trg_inf, newline='\n') as trg_inf:
cntr = 0
for i, line in enumerate(src_inf):
cntr += 1
src_outf.write(line.replace('\r', ' ').strip() + '\n')
for j, line in enumerate(trg_inf):
cntr -= 1
trg_outf.write(line.replace('\r', ' ').strip() + '\n')
assert cntr == 0, 'Number of lines in two files are inconsistent.'
return src_fpath, trg_fpath
def encode_file(bpe, in_file, out_file):
sys.stderr.write(f"Read raw content from {in_file} and \n"\
f"Write encoded content to {out_file}\n")
with codecs.open(in_file, encoding='utf-8') as in_f:
with codecs.open(out_file, 'w', encoding='utf-8') as out_f:
for line in in_f:
out_f.write(bpe.process_line(line))
def encode_files(bpe, src_in_file, trg_in_file, data_dir, prefix):
src_out_file = os.path.join(data_dir, f"{prefix}.src")
trg_out_file = os.path.join(data_dir, f"{prefix}.trg")
if os.path.isfile(src_out_file) and os.path.isfile(trg_out_file):
sys.stderr.write(f"Encoded files found, skip the encoding process ...\n")
encode_file(bpe, src_in_file, src_out_file)
encode_file(bpe, trg_in_file, trg_out_file)
return src_out_file, trg_out_file
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-raw_dir', required=True)
parser.add_argument('-data_dir', required=True)
parser.add_argument('-codes', required=True)
parser.add_argument('-save_data', required=True)
parser.add_argument('-prefix', required=True)
parser.add_argument('-max_len', type=int, default=100)
parser.add_argument('--symbols', '-s', type=int, default=32000, help="Vocabulary size")
parser.add_argument(
'--min-frequency', type=int, default=6, metavar='FREQ',
help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s))')
parser.add_argument('--dict-input', action="store_true",
help="If set, input file is interpreted as a dictionary where each line contains a word-count pair")
parser.add_argument(
'--separator', type=str, default='@@', metavar='STR',
help="Separator between non-final subword units (default: '%(default)s'))")
parser.add_argument('--total-symbols', '-t', action="store_true")
opt = parser.parse_args()
# Create folder if needed.
mkdir_if_needed(opt.raw_dir)
mkdir_if_needed(opt.data_dir)
# Download and extract raw data.
raw_train = get_raw_files(opt.raw_dir, _TRAIN_DATA_SOURCES)
raw_val = get_raw_files(opt.raw_dir, _VAL_DATA_SOURCES)
raw_test = get_raw_files(opt.raw_dir, _TEST_DATA_SOURCES)
# Merge files into one.
train_src, train_trg = compile_files(opt.raw_dir, raw_train, opt.prefix + '-train')
val_src, val_trg = compile_files(opt.raw_dir, raw_val, opt.prefix + '-val')
test_src, test_trg = compile_files(opt.raw_dir, raw_test, opt.prefix + '-test')
# Build up the code from training files if not exist
opt.codes = os.path.join(opt.data_dir, opt.codes)
if not os.path.isfile(opt.codes):
sys.stderr.write(f"Collect codes from training data and save to {opt.codes}.\n")
learn_bpe(raw_train['src'] + raw_train['trg'], opt.codes, opt.symbols, opt.min_frequency, True)
sys.stderr.write(f"BPE codes prepared.\n")
sys.stderr.write(f"Build up the tokenizer.\n")
with codecs.open(opt.codes, encoding='utf-8') as codes:
bpe = BPE(codes, separator=opt.separator)
sys.stderr.write(f"Encoding ...\n")
encode_files(bpe, train_src, train_trg, opt.data_dir, opt.prefix + '-train')
encode_files(bpe, val_src, val_trg, opt.data_dir, opt.prefix + '-val')
encode_files(bpe, test_src, test_trg, opt.data_dir, opt.prefix + '-test')
sys.stderr.write(f"Done.\n")
field = Field(
tokenize=str.split,
lower=True,
pad_token=Constants.PAD_WORD,
init_token=Constants.BOS_WORD,
eos_token=Constants.EOS_WORD)
fields = (field, field)
MAX_LEN = opt.max_len
def filter_examples_with_length(x):
return len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN
enc_train_files_prefix = opt.prefix + '-train'
train = TranslationDataset(
fields=fields,
path=os.path.join(opt.data_dir, enc_train_files_prefix),
exts=('.src', '.trg'),
filter_pred=filter_examples_with_length)
from itertools import chain
field.build_vocab(chain(train.src, train.trg), min_freq=2)
data = { 'settings': opt, 'vocab': field, }
opt.save_data = os.path.join(opt.data_dir, opt.save_data)
print('[Info] Dumping the processed data to pickle file', opt.save_data)
pickle.dump(data, open(opt.save_data, 'wb'))
def main_wo_bpe():
'''
Usage: python preprocess.py -lang_src de -lang_trg en -save_data multi30k_de_en.pkl -share_vocab
'''
spacy_support_langs = ['de_core_news_sm', 'el_core_news_sm', 'en_core_web_sm', 'es_core_news_sm', 'fr_core_news_sm', 'it_core_news_sm', 'lt_core_news_sm', 'nb_core_news_sm', 'nl_core_news_sm', 'pt_core_news_sm']
parser = argparse.ArgumentParser()
parser.add_argument('-lang_src', required=True, choices=spacy_support_langs)
parser.add_argument('-lang_trg', required=True, choices=spacy_support_langs)
parser.add_argument('-save_data', required=True)
parser.add_argument('-data_src', type=str, default=None)
parser.add_argument('-data_trg', type=str, default=None)
parser.add_argument('-max_len', type=int, default=100)
parser.add_argument('-min_word_count', type=int, default=3)
parser.add_argument('-keep_case', action='store_true')
parser.add_argument('-share_vocab', action='store_true')
parser.add_argument('-data_path', type=str, required=True)
#parser.add_argument('-ratio', '--train_valid_test_ratio', type=int, nargs=3, metavar=(8,1,1))
#parser.add_argument('-vocab', default=None)
opt = parser.parse_args()
assert not any([opt.data_src, opt.data_trg]), 'Custom data input is not support now.'
assert not any([opt.data_src, opt.data_trg]) or all([opt.data_src, opt.data_trg])
print(opt)
src_lang_model = spacy.load(opt.lang_src)
trg_lang_model = spacy.load(opt.lang_trg)
def tokenize_src(text):
return [tok.text for tok in src_lang_model.tokenizer(text)]
def tokenize_trg(text):
return [tok.text for tok in trg_lang_model.tokenizer(text)]
SRC = Field(
tokenize=tokenize_src, lower=not opt.keep_case,
pad_token=Constants.PAD_WORD, init_token=Constants.BOS_WORD, eos_token=Constants.EOS_WORD)
TRG = Field(
tokenize=tokenize_trg, lower=not opt.keep_case,
pad_token=Constants.PAD_WORD, init_token=Constants.BOS_WORD, eos_token=Constants.EOS_WORD)
MAX_LEN = opt.max_len
MIN_FREQ = opt.min_word_count
if not all([opt.data_src, opt.data_trg]):
assert {opt.lang_src, opt.lang_trg} == {'de_core_news_sm', 'en_core_web_sm'}
else:
# Pack custom txt file into example datasets
raise NotImplementedError
def filter_examples_with_length(x):
return len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN
def get_short_lang(full_lang):
return full_lang.split('_')[0]
train, val, test = Multi30k.splits(
exts = ('.' + get_short_lang(opt.lang_src), '.' + get_short_lang(opt.lang_trg)),
fields = (SRC, TRG),
filter_pred=filter_examples_with_length,
path=opt.data_path)
SRC.build_vocab(train.src, min_freq=MIN_FREQ)
print('[Info] Get source language vocabulary size:', len(SRC.vocab))
TRG.build_vocab(train.trg, min_freq=MIN_FREQ)
print('[Info] Get target language vocabulary size:', len(TRG.vocab))
if opt.share_vocab:
print('[Info] Merging two vocabulary ...')
for w, _ in SRC.vocab.stoi.items():
# TODO: Also update the `freq`, although it is not likely to be used.
if w not in TRG.vocab.stoi:
TRG.vocab.stoi[w] = len(TRG.vocab.stoi)
TRG.vocab.itos = [None] * len(TRG.vocab.stoi)
for w, i in TRG.vocab.stoi.items():
TRG.vocab.itos[i] = w
SRC.vocab.stoi = TRG.vocab.stoi
SRC.vocab.itos = TRG.vocab.itos
print('[Info] Get merged vocabulary size:', len(TRG.vocab))
data = {
'settings': opt,
'vocab': {'src': SRC, 'trg': TRG},
'train': train.examples,
'valid': val.examples,
'test': test.examples}
print('[Info] Dumping the processed data to pkl file', opt.save_data)
pickle.dump(data, open(opt.save_data, 'wb'))
if __name__ == '__main__':
main_wo_bpe()
#main()
|
from argparse import Namespace
import math
import time
import os
import dill as pickle
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchbenchmark.util.torchtext_legacy.field import Field
from torchbenchmark.util.torchtext_legacy.data import Dataset
from torchbenchmark.util.torchtext_legacy.iterator import BucketIterator
from torchbenchmark.util.torchtext_legacy.translation import TranslationDataset
from .transformer import Constants
from .transformer.Models import Transformer
from .transformer.Optim import ScheduledOptim
from .train import prepare_dataloaders, cal_performance, patch_src, patch_trg
import random
import numpy as np
from pathlib import Path
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
class Model(BenchmarkModel):
task = NLP.TRANSLATION
# Original batch size 256, hardware platform unknown
# Source: https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/132907dd272e2cc92e3c10e6c4e783a87ff8893d/README.md?plain=1#L83
DEFAULT_TRAIN_BSIZE = 256
DEFAULT_EVAL_BSIZE = 32
NUM_OF_BATCHES = 1
init_lr = 2.0
def _create_transformer(self):
transformer = Transformer(
self.opt.src_vocab_size,
self.opt.trg_vocab_size,
src_pad_idx=self.opt.src_pad_idx,
trg_pad_idx=self.opt.trg_pad_idx,
trg_emb_prj_weight_sharing=self.opt.proj_share_weight,
emb_src_trg_weight_sharing=self.opt.embs_share_weight,
d_k=self.opt.d_k,
d_v=self.opt.d_v,
d_model=self.opt.d_model,
d_word_vec=self.opt.d_word_vec,
d_inner=self.opt.d_inner_hid,
n_layers=self.opt.n_layers,
n_head=self.opt.n_head,
dropout=self.opt.dropout).to(self.device)
return transformer
def _preprocess(self, data_iter):
preloaded_data = []
for d in data_iter:
src_seq = patch_src(d.src, self.opt.src_pad_idx).to(self.device)
trg_seq, gold = map(lambda x: x.to(self.device), patch_trg(d.trg, self.opt.trg_pad_idx))
preloaded_data.append((src_seq, trg_seq, gold))
return preloaded_data
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
root = os.path.join(str(Path(__file__).parent), ".data")
self.opt = Namespace(**{
'batch_size': self.batch_size,
'd_inner_hid': 2048,
'd_k': 64,
'd_model': 512,
'd_word_vec': 512,
'd_v': 64,
'data_pkl': f'{root}/m30k_deen_shr.pkl',
'debug': '',
'dropout': 0.1,
'embs_share_weight': False,
'epoch': 1,
'label_smoothing': False,
'log': None,
'n_head': 8,
'n_layers': 6,
'n_warmup_steps': 128,
'cuda': True,
'proj_share_weight': False,
'save_mode': 'best',
'save_model': None,
'script': False,
'train_path': None,
'val_path': None,
})
train_data, test_data = prepare_dataloaders(self.opt, self.device)
self.model = self._create_transformer()
if test == "train":
self.model.train()
self.example_inputs = self._preprocess(train_data)
self.optimizer = ScheduledOptim(
optim.Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),
self.init_lr, self.opt.d_model, self.opt.n_warmup_steps)
elif test == "eval":
self.model.eval()
self.example_inputs = self._preprocess(test_data)
def get_module(self):
for (src_seq, trg_seq, gold) in self.example_inputs:
return self.model, (*(src_seq, trg_seq), )
def get_optimizer(self):
if hasattr(self, "optimizer"):
return self.optimizer
return None
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
# set_optimizer sets the optimizer to whatever you pass it. set_inner_optimizer will wrap your
# input optimizer with ScheduledOptim.
def set_inner_optimizer(self, optimizer: torch.optim.Optimizer) -> None:
self.optimizer = ScheduledOptim(optimizer, self.init_lr, self.opt.d_model, self.opt.n_warmup_steps)
def eval(self) -> torch.Tensor:
result = None
for _, (src_seq, trg_seq, gold) in zip(range(self.NUM_OF_BATCHES), self.example_inputs):
result = self.model(*(src_seq, trg_seq))
return (result, )
def train(self):
for _, (src_seq, trg_seq, gold) in zip(range(self.NUM_OF_BATCHES), self.example_inputs):
self.optimizer.zero_grad()
example_inputs = (src_seq, trg_seq)
pred = self.model(*example_inputs)
loss, n_correct, n_word = cal_performance(
pred, gold, self.opt.trg_pad_idx, smoothing=self.opt.label_smoothing)
loss.backward()
self.optimizer.step_and_update_lr()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
"""Use operations learned with learn_bpe.py to encode a new text.
The text will not be smaller, but use only a fixed vocabulary, with rare words
encoded as variable-length sequences of subword units.
Reference:
Rico Sennrich, Barry Haddow and Alexandra Birch (2015). Neural Machine Translation of Rare Words with Subword Units.
Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
"""
from __future__ import unicode_literals, division
import sys
import os
import inspect
import codecs
import io
import re
import warnings
import random
class BPE(object):
def __init__(self, codes, merges=-1, separator='@@', vocab=None, glossaries=None):
codes.seek(0)
offset=1
# check version information
firstline = codes.readline()
if firstline.startswith('#version:'):
self.version = tuple([int(x) for x in re.sub(r'(\.0+)*$','', firstline.split()[-1]).split(".")])
offset += 1
else:
self.version = (0, 1)
codes.seek(0)
self.bpe_codes = [tuple(item.strip('\r\n ').split(' ')) for (n, item) in enumerate(codes) if (n < merges or merges == -1)]
for i, item in enumerate(self.bpe_codes):
if len(item) != 2:
sys.stderr.write('Error: invalid line {0} in BPE codes file: {1}\n'.format(i+offset, ' '.join(item)))
sys.stderr.write('The line should exist of exactly two subword units, separated by whitespace\n')
sys.exit(1)
# some hacking to deal with duplicates (only consider first instance)
self.bpe_codes = dict([(code,i) for (i,code) in reversed(list(enumerate(self.bpe_codes)))])
self.bpe_codes_reverse = dict([(pair[0] + pair[1], pair) for pair,i in self.bpe_codes.items()])
self.separator = separator
self.vocab = vocab
self.glossaries = glossaries if glossaries else []
self.glossaries_regex = re.compile('^({})$'.format('|'.join(glossaries))) if glossaries else None
self.cache = {}
def process_line(self, line, dropout=0):
"""segment line, dealing with leading and trailing whitespace"""
out = ""
leading_whitespace = len(line)-len(line.lstrip('\r\n '))
if leading_whitespace:
out += line[:leading_whitespace]
out += self.segment(line, dropout)
trailing_whitespace = len(line)-len(line.rstrip('\r\n '))
if trailing_whitespace and trailing_whitespace != len(line):
out += line[-trailing_whitespace:]
return out
def segment(self, sentence, dropout=0):
"""segment single sentence (whitespace-tokenized string) with BPE encoding"""
segments = self.segment_tokens(sentence.strip('\r\n ').split(' '), dropout)
return ' '.join(segments)
def segment_tokens(self, tokens, dropout=0):
"""segment a sequence of tokens with BPE encoding"""
output = []
for word in tokens:
# eliminate double spaces
if not word:
continue
new_word = [out for segment in self._isolate_glossaries(word)
for out in encode(segment,
self.bpe_codes,
self.bpe_codes_reverse,
self.vocab,
self.separator,
self.version,
self.cache,
self.glossaries_regex,
dropout)]
for item in new_word[:-1]:
output.append(item + self.separator)
output.append(new_word[-1])
return output
def _isolate_glossaries(self, word):
word_segments = [word]
for gloss in self.glossaries:
word_segments = [out_segments for segment in word_segments
for out_segments in isolate_glossary(segment, gloss)]
return word_segments
def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries_regex=None, dropout=0):
"""Encode word based on list of BPE merge operations, which are applied consecutively
"""
if not dropout and orig in cache:
return cache[orig]
if glossaries_regex and glossaries_regex.match(orig):
cache[orig] = (orig,)
return (orig,)
if len(orig) == 1:
return orig
if version == (0, 1):
word = list(orig) + ['</w>']
elif version == (0, 2): # more consistent handling of word-final segments
word = list(orig[:-1]) + [orig[-1] + '</w>']
else:
raise NotImplementedError
while len(word) > 1:
# get list of symbol pairs; optionally apply dropout
pairs = [(bpe_codes[pair],i,pair) for (i,pair) in enumerate(zip(word, word[1:])) if (not dropout or random.random() > dropout) and pair in bpe_codes]
if not pairs:
break
#get first merge operation in list of BPE codes
bigram = min(pairs)[2]
# find start position of all pairs that we want to merge
positions = [i for (rank,i,pair) in pairs if pair == bigram]
i = 0
new_word = []
bigram = ''.join(bigram)
for j in positions:
# merges are invalid if they start before current position. This can happen if there are overlapping pairs: (x x x -> xx x)
if j < i:
continue
new_word.extend(word[i:j]) # all symbols before merged pair
new_word.append(bigram) # merged pair
i = j+2 # continue after merged pair
new_word.extend(word[i:]) # add all symbols until end of word
word = new_word
# don't print end-of-word symbols
if word[-1] == '</w>':
word = word[:-1]
elif word[-1].endswith('</w>'):
word[-1] = word[-1][:-4]
word = tuple(word)
if vocab:
word = check_vocab_and_split(word, bpe_codes_reverse, vocab, separator)
cache[orig] = word
return word
def recursive_split(segment, bpe_codes, vocab, separator, final=False):
"""Recursively split segment into smaller units (by reversing BPE merges)
until all units are either in-vocabulary, or cannot be split futher."""
try:
if final:
left, right = bpe_codes[segment + '</w>']
right = right[:-4]
else:
left, right = bpe_codes[segment]
except:
#sys.stderr.write('cannot split {0} further.\n'.format(segment))
yield segment
return
if left + separator in vocab:
yield left
else:
for item in recursive_split(left, bpe_codes, vocab, separator, False):
yield item
if (final and right in vocab) or (not final and right + separator in vocab):
yield right
else:
for item in recursive_split(right, bpe_codes, vocab, separator, final):
yield item
def check_vocab_and_split(orig, bpe_codes, vocab, separator):
"""Check for each segment in word if it is in-vocabulary,
and segment OOV segments into smaller units by reversing the BPE merge operations"""
out = []
for segment in orig[:-1]:
if segment + separator in vocab:
out.append(segment)
else:
#sys.stderr.write('OOV: {0}\n'.format(segment))
for item in recursive_split(segment, bpe_codes, vocab, separator, False):
out.append(item)
segment = orig[-1]
if segment in vocab:
out.append(segment)
else:
#sys.stderr.write('OOV: {0}\n'.format(segment))
for item in recursive_split(segment, bpe_codes, vocab, separator, True):
out.append(item)
return out
def read_vocabulary(vocab_file, threshold):
"""read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.
"""
vocabulary = set()
for line in vocab_file:
word, freq = line.strip('\r\n ').split(' ')
freq = int(freq)
if threshold == None or freq >= threshold:
vocabulary.add(word)
return vocabulary
def isolate_glossary(word, glossary):
"""
Isolate a glossary present inside a word.
Returns a list of subwords. In which all 'glossary' glossaries are isolated
For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is:
['1934', 'USA', 'B', 'USA']
"""
# regex equivalent of (if word == glossary or glossary not in word)
if re.match('^'+glossary+'$', word) or not re.search(glossary, word):
return [word]
else:
segments = re.split(r'({})'.format(glossary), word)
segments, ending = segments[:-1], segments[-1]
segments = list(filter(None, segments)) # Remove empty strings in regex group.
return segments + [ending.strip('\r\n ')] if ending != '' else segments
|
'''
This script handles the training process.
'''
import argparse
import math
import time
import functools
import dill as pickle
from tqdm import tqdm
import torch
import torch.nn.functional as F
import torch.optim as optim
from torchbenchmark.util.torchtext_legacy.field import Field
from torchbenchmark.util.torchtext_legacy.data import Dataset
from torchbenchmark.util.torchtext_legacy.iterator import BucketIterator
from torchbenchmark.util.torchtext_legacy.translation import TranslationDataset
from .transformer import Constants
from .transformer.Models import Transformer
from .transformer.Optim import ScheduledOptim
import random
import numpy as np
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = False
__author__ = "Yu-Hsiang Huang"
def cal_performance(pred, gold, trg_pad_idx, smoothing=False):
''' Apply label smoothing if needed '''
loss = cal_loss(pred, gold, trg_pad_idx, smoothing=smoothing)
pred = pred.max(1)[1]
gold = gold.contiguous().view(-1)
non_pad_mask = gold.ne(trg_pad_idx)
n_correct = pred.eq(gold).masked_select(non_pad_mask).sum().item()
n_word = non_pad_mask.sum().item()
return loss, n_correct, n_word
def cal_loss(pred, gold, trg_pad_idx, smoothing=False):
''' Calculate cross entropy loss, apply label smoothing if needed. '''
gold = gold.contiguous().view(-1)
if smoothing:
eps = 0.1
n_class = pred.size(1)
one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(trg_pad_idx)
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).sum() # average later
else:
loss = F.cross_entropy(pred, gold, ignore_index=trg_pad_idx, reduction='sum')
return loss
def patch_src(src, pad_idx):
src = src.transpose(0, 1)
return src
def patch_trg(trg, pad_idx):
trg = trg.transpose(0, 1)
trg, gold = trg[:, :-1], trg[:, 1:].contiguous().view(-1)
return trg, gold
last_run = None
def train_epoch(model, training_data, optimizer, opt, device, smoothing):
''' Epoch operation in training phase'''
global last_run
model.train()
total_loss, n_word_total, n_word_correct = 0, 0, 0
desc = ' - (Training) '
for batch in tqdm(training_data, mininterval=2, desc=desc, leave=False):
# prepare data
src_seq = patch_src(batch.src, opt.src_pad_idx).to(device)
trg_seq, gold = map(lambda x: x.to(device), patch_trg(batch.trg, opt.trg_pad_idx))
# forward
optimizer.zero_grad()
last_run = (src_seq, trg_seq)
pred = model(src_seq, trg_seq)
# backward and update parameters
loss, n_correct, n_word = cal_performance(
pred, gold, opt.trg_pad_idx, smoothing=smoothing)
loss.backward()
optimizer.step_and_update_lr()
# note keeping
n_word_total += n_word
n_word_correct += n_correct
total_loss += loss.item()
loss_per_word = total_loss/n_word_total
accuracy = n_word_correct/n_word_total
return loss_per_word, accuracy
def eval_epoch(model, validation_data, device, opt):
''' Epoch operation in evaluation phase '''
model.eval()
total_loss, n_word_total, n_word_correct = 0, 0, 0
desc = ' - (Validation) '
with torch.no_grad():
for batch in tqdm(validation_data, mininterval=2, desc=desc, leave=False):
# prepare data
src_seq = patch_src(batch.src, opt.src_pad_idx).to(device)
trg_seq, gold = map(lambda x: x.to(device), patch_trg(batch.trg, opt.trg_pad_idx))
# forward
pred = model(src_seq, trg_seq)
loss, n_correct, n_word = cal_performance(
pred, gold, opt.trg_pad_idx, smoothing=False)
# note keeping
n_word_total += n_word
n_word_correct += n_correct
total_loss += loss.item()
loss_per_word = total_loss/n_word_total
accuracy = n_word_correct/n_word_total
return loss_per_word, accuracy
def train(model, training_data, validation_data, optimizer, device, opt):
''' Start training '''
log_train_file, log_valid_file = None, None
if opt.log:
log_train_file = opt.log + '.train.log'
log_valid_file = opt.log + '.valid.log'
print('[Info] Training performance will be written to file: {} and {}'.format(
log_train_file, log_valid_file))
with open(log_train_file, 'w') as log_tf, open(log_valid_file, 'w') as log_vf:
log_tf.write('epoch,loss,ppl,accuracy\n')
log_vf.write('epoch,loss,ppl,accuracy\n')
def print_performances(header, loss, accu, start_time):
print(' - {header:12} ppl: {ppl: 8.5f}, accuracy: {accu:3.3f} %, '\
'elapse: {elapse:3.3f} min'.format(
header=f"({header})", ppl=math.exp(min(loss, 100)),
accu=100*accu, elapse=(time.time()-start_time)/60))
#valid_accus = []
valid_losses = []
for epoch_i in range(opt.epoch):
print('[ Epoch', epoch_i, ']')
start = time.time()
train_loss, train_accu = train_epoch(
model, training_data, optimizer, opt, device, smoothing=opt.label_smoothing)
print_performances('Training', train_loss, train_accu, start)
start = time.time()
valid_loss, valid_accu = eval_epoch(model, validation_data, device, opt)
print_performances('Validation', valid_loss, valid_accu, start)
valid_losses += [valid_loss]
checkpoint = {'epoch': epoch_i, 'settings': opt, 'model': model.state_dict()}
if opt.save_model:
if opt.save_mode == 'all':
model_name = opt.save_model + '_accu_{accu:3.3f}.chkpt'.format(accu=100*valid_accu)
torch.save(checkpoint, model_name)
elif opt.save_mode == 'best':
model_name = opt.save_model + '.chkpt'
if valid_loss <= min(valid_losses):
torch.save(checkpoint, model_name)
print(' - [Info] The checkpoint file has been updated.')
if log_train_file and log_valid_file:
with open(log_train_file, 'a') as log_tf, open(log_valid_file, 'a') as log_vf:
log_tf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=train_loss,
ppl=math.exp(min(train_loss, 100)), accu=100*train_accu))
log_vf.write('{epoch},{loss: 8.5f},{ppl: 8.5f},{accu:3.3f}\n'.format(
epoch=epoch_i, loss=valid_loss,
ppl=math.exp(min(valid_loss, 100)), accu=100*valid_accu))
def main():
'''
Usage:
python train.py -data_pkl m30k_deen_shr.pkl -log m30k_deen_shr -embs_share_weight -proj_share_weight -label_smoothing -save_model trained -b 256 -warmup 128000
'''
parser = argparse.ArgumentParser()
parser.add_argument('-data_pkl', default=None) # all-in-1 data pickle or bpe field
parser.add_argument('-train_path', default=None) # bpe encoded data
parser.add_argument('-val_path', default=None) # bpe encoded data
parser.add_argument('-epoch', type=int, default=10)
parser.add_argument('-b', '--batch_size', type=int, default=2048)
parser.add_argument('-d_model', type=int, default=512)
parser.add_argument('-d_inner_hid', type=int, default=2048)
parser.add_argument('-d_k', type=int, default=64)
parser.add_argument('-d_v', type=int, default=64)
parser.add_argument('-n_head', type=int, default=8)
parser.add_argument('-n_layers', type=int, default=6)
parser.add_argument('-warmup','--n_warmup_steps', type=int, default=4000)
parser.add_argument('-dropout', type=float, default=0.1)
parser.add_argument('-embs_share_weight', action='store_true')
parser.add_argument('-proj_share_weight', action='store_true')
parser.add_argument('-log', default=None)
parser.add_argument('-save_model', default=None)
parser.add_argument('-save_mode', type=str, choices=['all', 'best'], default='best')
parser.add_argument('-no_cuda', action='store_true')
parser.add_argument('-label_smoothing', action='store_true')
parser.add_argument('--debug', metavar='fn', default="", help="Dump outputs into file")
parser.add_argument('--script', default=False, help="Script the model")
opt = parser.parse_args()
opt.cuda = not opt.no_cuda
opt.d_word_vec = opt.d_model
if not opt.log and not opt.save_model:
raise Exception('No experiment result will be saved.')
if opt.batch_size < 2048 and opt.n_warmup_steps <= 4000:
print('[Warning] The warmup steps may be not enough.\n'\
'(sz_b, warmup) = (2048, 4000) is the official setting.\n'\
'Using smaller batch w/o longer warmup may cause '\
'the warmup stage ends with only little data trained.')
device = torch.device('cuda' if opt.cuda else 'cpu')
#========= Loading Dataset =========#
if all((opt.train_path, opt.val_path)):
training_data, validation_data = prepare_dataloaders_from_bpe_files(opt, device)
elif opt.data_pkl:
training_data, validation_data = prepare_dataloaders(opt, device)
else:
raise Exception("Error loading dataset")
print(opt)
transformer = Transformer(
opt.src_vocab_size,
opt.trg_vocab_size,
src_pad_idx=opt.src_pad_idx,
trg_pad_idx=opt.trg_pad_idx,
trg_emb_prj_weight_sharing=opt.proj_share_weight,
emb_src_trg_weight_sharing=opt.embs_share_weight,
d_k=opt.d_k,
d_v=opt.d_v,
d_model=opt.d_model,
d_word_vec=opt.d_word_vec,
d_inner=opt.d_inner_hid,
n_layers=opt.n_layers,
n_head=opt.n_head,
dropout=opt.dropout).to(device)
if opt.script:
print("scripted")
transformer = torch.jit.script(transformer)
else:
print("eager mode")
optimizer = ScheduledOptim(
optim.Adam(transformer.parameters(), betas=(0.9, 0.98), eps=1e-09),
2.0, opt.d_model, opt.n_warmup_steps)
train(transformer, training_data, validation_data, optimizer, device, opt)
assert(last_run)
if opt.debug:
o = transformer(*last_run)
torch.save(o, opt.debug)
def prepare_dataloaders_from_bpe_files(opt, device):
batch_size = opt.batch_size
MIN_FREQ = 2
if not opt.embs_share_weight:
raise Exception("err")
data = pickle.load(open(opt.data_pkl, 'rb'))
MAX_LEN = data['settings'].max_len
field = data['vocab']
fields = (field, field)
def filter_examples_with_length(x):
return len(vars(x)['src']) <= MAX_LEN and len(vars(x)['trg']) <= MAX_LEN
train = TranslationDataset(
fields=fields,
path=opt.train_path,
exts=('.src', '.trg'),
filter_pred=filter_examples_with_length)
val = TranslationDataset(
fields=fields,
path=opt.val_path,
exts=('.src', '.trg'),
filter_pred=filter_examples_with_length)
opt.max_token_seq_len = MAX_LEN + 2
opt.src_pad_idx = opt.trg_pad_idx = field.vocab.stoi[Constants.PAD_WORD]
opt.src_vocab_size = opt.trg_vocab_size = len(field.vocab)
train_iterator = BucketIterator(train, batch_size=batch_size, device=device, train=True)
val_iterator = BucketIterator(val, batch_size=batch_size, device=device)
return train_iterator, val_iterator
def prepare_dataloaders(opt, device):
batch_size = opt.batch_size
data = pickle.load(open(opt.data_pkl, 'rb'))
opt.max_token_seq_len = data['settings'].max_len
opt.src_pad_idx = data['vocab']['src'].vocab.stoi[Constants.PAD_WORD]
opt.trg_pad_idx = data['vocab']['trg'].vocab.stoi[Constants.PAD_WORD]
opt.src_vocab_size = len(data['vocab']['src'].vocab)
opt.trg_vocab_size = len(data['vocab']['trg'].vocab)
#========= Preparing Model =========#
if opt.embs_share_weight:
assert data['vocab']['src'].vocab.stoi == data['vocab']['trg'].vocab.stoi, \
'To sharing word embedding the src/trg word2idx table shall be the same.'
fields = {'src': data['vocab']['src'], 'trg':data['vocab']['trg']}
train = Dataset(examples=data['train'], fields=fields)
val = Dataset(examples=data['valid'], fields=fields)
train_iterator = BucketIterator(train, batch_size=batch_size, device=device, train=True)
val_iterator = BucketIterator(val, batch_size=batch_size, device=device)
return train_iterator, val_iterator
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Rico Sennrich
"""Use byte pair encoding (BPE) to learn a variable-length encoding of the vocabulary in a text.
Unlike the original BPE, it does not compress the plain text, but can be used to reduce the vocabulary
of a text to a configurable number of symbols, with only a small increase in the number of tokens.
Reference:
Rico Sennrich, Barry Haddow and Alexandra Birch (2016). Neural Machine Translation of Rare Words with Subword Units.
Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (ACL 2016). Berlin, Germany.
"""
from __future__ import unicode_literals
import os
import sys
import inspect
import codecs
import re
import copy
import warnings
from collections import defaultdict, Counter
def update_vocabulary(vocab, file_name, is_dict=False):
"""Read text and return dictionary that encodes vocabulary
"""
#vocab = Counter()
with codecs.open(file_name, encoding='utf-8') as fobj:
for i, line in enumerate(fobj):
if is_dict:
try:
word, count = line.strip('\r\n ').split(' ')
except:
print('Failed reading vocabulary file at line {0}: {1}'.format(i, line))
sys.exit(1)
vocab[word] += int(count)
else:
for word in line.strip('\r\n ').split(' '):
if word:
vocab[word] += 1
return vocab
def update_pair_statistics(pair, changed, stats, indices):
"""Minimally update the indices and frequency of symbol pairs
if we merge a pair of symbols, only pairs that overlap with occurrences
of this pair are affected, and need to be updated.
"""
stats[pair] = 0
indices[pair] = defaultdict(int)
first, second = pair
new_pair = first+second
for j, word, old_word, freq in changed:
# find all instances of pair, and update frequency/indices around it
i = 0
while True:
# find first symbol
try:
i = old_word.index(first, i)
except ValueError:
break
# if first symbol is followed by second symbol, we've found an occurrence of pair (old_word[i:i+2])
if i < len(old_word)-1 and old_word[i+1] == second:
# assuming a symbol sequence "A B C", if "B C" is merged, reduce the frequency of "A B"
if i:
prev = old_word[i-1:i+1]
stats[prev] -= freq
indices[prev][j] -= 1
if i < len(old_word)-2:
# assuming a symbol sequence "A B C B", if "B C" is merged, reduce the frequency of "C B".
# however, skip this if the sequence is A B C B C, because the frequency of "C B" will be reduced by the previous code block
if old_word[i+2] != first or i >= len(old_word)-3 or old_word[i+3] != second:
nex = old_word[i+1:i+3]
stats[nex] -= freq
indices[nex][j] -= 1
i += 2
else:
i += 1
i = 0
while True:
try:
# find new pair
i = word.index(new_pair, i)
except ValueError:
break
# assuming a symbol sequence "A BC D", if "B C" is merged, increase the frequency of "A BC"
if i:
prev = word[i-1:i+1]
stats[prev] += freq
indices[prev][j] += 1
# assuming a symbol sequence "A BC B", if "B C" is merged, increase the frequency of "BC B"
# however, if the sequence is A BC BC, skip this step because the count of "BC BC" will be incremented by the previous code block
if i < len(word)-1 and word[i+1] != new_pair:
nex = word[i:i+2]
stats[nex] += freq
indices[nex][j] += 1
i += 1
def get_pair_statistics(vocab):
"""Count frequency of all symbol pairs, and create index"""
# data structure of pair frequencies
stats = defaultdict(int)
#index from pairs to words
indices = defaultdict(lambda: defaultdict(int))
for i, (word, freq) in enumerate(vocab):
prev_char = word[0]
for char in word[1:]:
stats[prev_char, char] += freq
indices[prev_char, char][i] += 1
prev_char = char
return stats, indices
def replace_pair(pair, vocab, indices):
"""Replace all occurrences of a symbol pair ('A', 'B') with a new symbol 'AB'"""
first, second = pair
pair_str = ''.join(pair)
pair_str = pair_str.replace('\\','\\\\')
changes = []
pattern = re.compile(r'(?<!\S)' + re.escape(first + ' ' + second) + r'(?!\S)')
if sys.version_info < (3, 0):
iterator = indices[pair].iteritems()
else:
iterator = indices[pair].items()
for j, freq in iterator:
if freq < 1:
continue
word, freq = vocab[j]
new_word = ' '.join(word)
new_word = pattern.sub(pair_str, new_word)
new_word = tuple(new_word.split(' '))
vocab[j] = (new_word, freq)
changes.append((j, new_word, word, freq))
return changes
def prune_stats(stats, big_stats, threshold):
"""Prune statistics dict for efficiency of max()
The frequency of a symbol pair never increases, so pruning is generally safe
(until we the most frequent pair is less frequent than a pair we previously pruned)
big_stats keeps full statistics for when we need to access pruned items
"""
for item,freq in list(stats.items()):
if freq < threshold:
del stats[item]
if freq < 0:
big_stats[item] += freq
else:
big_stats[item] = freq
def learn_bpe(infile_names, outfile_name, num_symbols, min_frequency=2, verbose=False, is_dict=False, total_symbols=False):
"""Learn num_symbols BPE operations from vocabulary, and write to outfile.
"""
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
#vocab = get_vocabulary(infile, is_dict)
vocab = Counter()
for f in infile_names:
sys.stderr.write(f'Collecting vocab from {f}\n')
vocab = update_vocabulary(vocab, f, is_dict)
vocab = dict([(tuple(x[:-1])+(x[-1]+'</w>',) ,y) for (x,y) in vocab.items()])
sorted_vocab = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
stats, indices = get_pair_statistics(sorted_vocab)
big_stats = copy.deepcopy(stats)
if total_symbols:
uniq_char_internal = set()
uniq_char_final = set()
for word in vocab:
for char in word[:-1]:
uniq_char_internal.add(char)
uniq_char_final.add(word[-1])
sys.stderr.write('Number of word-internal characters: {0}\n'.format(len(uniq_char_internal)))
sys.stderr.write('Number of word-final characters: {0}\n'.format(len(uniq_char_final)))
sys.stderr.write('Reducing number of merge operations by {0}\n'.format(len(uniq_char_internal) + len(uniq_char_final)))
num_symbols -= len(uniq_char_internal) + len(uniq_char_final)
sys.stderr.write(f'Write vocab file to {outfile_name}')
with codecs.open(outfile_name, 'w', encoding='utf-8') as outfile:
# version 0.2 changes the handling of the end-of-word token ('</w>');
# version numbering allows bckward compatibility
outfile.write('#version: 0.2\n')
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = max(stats.values()) / 10
for i in range(num_symbols):
if stats:
most_frequent = max(stats, key=lambda x: (stats[x], x))
# we probably missed the best pair because of pruning; go back to full statistics
if not stats or (i and stats[most_frequent] < threshold):
prune_stats(stats, big_stats, threshold)
stats = copy.deepcopy(big_stats)
most_frequent = max(stats, key=lambda x: (stats[x], x))
# threshold is inspired by Zipfian assumption, but should only affect speed
threshold = stats[most_frequent] * i/(i+10000.0)
prune_stats(stats, big_stats, threshold)
if stats[most_frequent] < min_frequency:
sys.stderr.write(f'no pair has frequency >= {min_frequency}. Stopping\n')
break
if verbose:
sys.stderr.write('pair {0}: {1} {2} -> {1}{2} (frequency {3})\n'.format(
i, most_frequent[0], most_frequent[1], stats[most_frequent]))
outfile.write('{0} {1}\n'.format(*most_frequent))
changes = replace_pair(most_frequent, sorted_vocab, indices)
update_pair_statistics(most_frequent, changes, stats, indices)
stats[most_frequent] = 0
if not i % 100:
prune_stats(stats, big_stats, threshold)
|
import os
import sys
import subprocess
from pathlib import Path
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
def spacy_download(language):
subprocess.check_call([sys.executable, '-m', 'spacy', 'download', language])
def preprocess():
current_dir = Path(os.path.dirname(os.path.realpath(__file__)))
multi30k_data_dir = os.path.join(current_dir.parent.parent, "data", ".data", "multi30k")
root = os.path.join(str(Path(__file__).parent), ".data")
os.makedirs(root, exist_ok=True)
subprocess.check_call([sys.executable, 'preprocess.py', '-lang_src', 'de_core_news_sm', '-lang_trg', 'en_core_web_sm', '-share_vocab',
'-save_data', os.path.join(root, 'm30k_deen_shr.pkl'), '-data_path', multi30k_data_dir])
if __name__ == '__main__':
pip_install_requirements()
spacy_download('en_core_web_sm')
spacy_download('de_core_news_sm')
# Preprocessed pkl is larger than 100MB so we cannot skip preprocess
preprocess()
|
''' Define the Transformer model '''
import torch
import torch.nn as nn
import numpy as np
from .Layers import EncoderLayer, DecoderLayer
__author__ = "Yu-Hsiang Huang"
def get_pad_mask(seq, pad_idx : int):
return (seq != pad_idx).unsqueeze(-2)
def get_subsequent_mask(seq):
''' For masking out the subsequent info. '''
sz_b, len_s = seq.size()
subsequent_mask = (1 - torch.triu(
torch.ones((1, len_s, len_s), device=seq.device), diagonal=1)).to(torch.bool)
return subsequent_mask
class PositionalEncoding(nn.Module):
def __init__(self, d_hid, n_position=200):
super(PositionalEncoding, self).__init__()
# Not a parameter
self.register_buffer('pos_table', self._get_sinusoid_encoding_table(n_position, d_hid))
def _get_sinusoid_encoding_table(self, n_position, d_hid):
''' Sinusoid position encoding table '''
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def forward(self, x):
return x + self.pos_table[:, :x.size(1)].clone().detach()
class Encoder(nn.Module):
''' A encoder model with self attention mechanism. '''
def __init__(
self, n_src_vocab, d_word_vec, n_layers, n_head, d_k, d_v,
d_model, d_inner, pad_idx, dropout=0.1, n_position=200):
super().__init__()
self.src_word_emb = nn.Embedding(n_src_vocab, d_word_vec, padding_idx=pad_idx)
self.position_enc = PositionalEncoding(d_word_vec, n_position=n_position)
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, src_seq, src_mask, return_attns=False):
enc_slf_attn_list = []
# -- Forward
enc_output = self.dropout(self.position_enc(self.src_word_emb(src_seq)))
enc_output = self.layer_norm(enc_output)
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(enc_output, slf_attn_mask=src_mask)
enc_slf_attn_list += [enc_slf_attn] if return_attns else []
assert(not return_attns)
#if return_attns:
# return enc_output, enc_slf_attn_list
return enc_output,
class Decoder(nn.Module):
''' A decoder model with self attention mechanism. '''
def __init__(
self, n_trg_vocab, d_word_vec, n_layers, n_head, d_k, d_v,
d_model, d_inner, pad_idx, n_position=200, dropout=0.1):
super().__init__()
self.trg_word_emb = nn.Embedding(n_trg_vocab, d_word_vec, padding_idx=pad_idx)
self.position_enc = PositionalEncoding(d_word_vec, n_position=n_position)
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
DecoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, trg_seq, trg_mask, enc_output, src_mask, return_attns=False):
dec_slf_attn_list, dec_enc_attn_list = [], []
# -- Forward
dec_output = self.dropout(self.position_enc(self.trg_word_emb(trg_seq)))
dec_output = self.layer_norm(dec_output)
for dec_layer in self.layer_stack:
dec_output, dec_slf_attn, dec_enc_attn = dec_layer(
dec_output, enc_output, slf_attn_mask=trg_mask, dec_enc_attn_mask=src_mask)
dec_slf_attn_list += [dec_slf_attn] if return_attns else []
dec_enc_attn_list += [dec_enc_attn] if return_attns else []
assert(not return_attns)
#if return_attns:
# return dec_output, dec_slf_attn_list, dec_enc_attn_list
return dec_output,
class Transformer(nn.Module):
''' A sequence to sequence model with attention mechanism. '''
def __init__(
self, n_src_vocab, n_trg_vocab, src_pad_idx, trg_pad_idx,
d_word_vec=512, d_model=512, d_inner=2048,
n_layers=6, n_head=8, d_k=64, d_v=64, dropout=0.1, n_position=200,
trg_emb_prj_weight_sharing=True, emb_src_trg_weight_sharing=True):
super().__init__()
self.src_pad_idx, self.trg_pad_idx = src_pad_idx, trg_pad_idx
self.encoder = Encoder(
n_src_vocab=n_src_vocab, n_position=n_position,
d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,
pad_idx=src_pad_idx, dropout=dropout)
self.decoder = Decoder(
n_trg_vocab=n_trg_vocab, n_position=n_position,
d_word_vec=d_word_vec, d_model=d_model, d_inner=d_inner,
n_layers=n_layers, n_head=n_head, d_k=d_k, d_v=d_v,
pad_idx=trg_pad_idx, dropout=dropout)
self.trg_word_prj = nn.Linear(d_model, n_trg_vocab, bias=False)
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
assert d_model == d_word_vec, \
'To facilitate the residual connections, \
the dimensions of all module outputs shall be the same.'
self.x_logit_scale = 1.
if trg_emb_prj_weight_sharing:
# Share the weight between target word embedding & last dense layer
self.trg_word_prj.weight = self.decoder.trg_word_emb.weight
self.x_logit_scale = (d_model ** -0.5)
if emb_src_trg_weight_sharing:
self.encoder.src_word_emb.weight = self.decoder.trg_word_emb.weight
def forward(self, src_seq, trg_seq):
src_mask = get_pad_mask(src_seq, self.src_pad_idx)
trg_mask = get_pad_mask(trg_seq, self.trg_pad_idx) & get_subsequent_mask(trg_seq)
enc_output, *_ = self.encoder(src_seq, src_mask)
dec_output, *_ = self.decoder(trg_seq, trg_mask, enc_output, src_mask)
seq_logit = self.trg_word_prj(dec_output) * self.x_logit_scale
return seq_logit.view(-1, seq_logit.size(2))
|
PAD_WORD = '<blank>'
UNK_WORD = '<unk>'
BOS_WORD = '<s>'
EOS_WORD = '</s>'
|
from . import Constants, Modules, Layers, SubLayers, Models, Translator, Optim
|
''' Define the sublayers in encoder/decoder layer '''
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from .Modules import ScaledDotProductAttention
from typing import Optional
__author__ = "Yu-Hsiang Huang"
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
self.fc = nn.Linear(n_head * d_v, d_model, bias=False)
self.attention = ScaledDotProductAttention(temperature=d_k ** 0.5)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
def forward(self, q, k, v, mask : Optional[torch.Tensor]=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, len_k, len_v = q.size(0), q.size(1), k.size(1), v.size(1)
residual = q
# Pass through the pre-attention projection: b x lq x (n*dv)
# Separate different heads: b x lq x n x dv
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
# Transpose for attention dot product: b x n x lq x dv
q, k, v = q.transpose(1, 2), k.transpose(1, 2), v.transpose(1, 2)
if mask is not None:
mask = mask.unsqueeze(1) # For head axis broadcasting.
q, attn = self.attention(q, k, v, mask=mask)
# Transpose to move the head dimension back: b x lq x n x dv
# Combine the last two dimensions to concatenate all the heads together: b x lq x (n*dv)
q = q.transpose(1, 2).contiguous().view(sz_b, len_q, -1)
q = self.dropout(self.fc(q))
q += residual
q = self.layer_norm(q)
return q, attn
class PositionwiseFeedForward(nn.Module):
''' A two-feed-forward-layer module '''
def __init__(self, d_in, d_hid, dropout=0.1):
super().__init__()
self.w_1 = nn.Linear(d_in, d_hid) # position-wise
self.w_2 = nn.Linear(d_hid, d_in) # position-wise
self.layer_norm = nn.LayerNorm(d_in, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
x = self.w_2(F.relu(self.w_1(x)))
x = self.dropout(x)
x += residual
x = self.layer_norm(x)
return x
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional
__author__ = "Yu-Hsiang Huang"
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
def forward(self, q, k, v, mask : Optional[torch.Tensor] = None):
attn = torch.matmul(q / self.temperature, k.transpose(2, 3))
if mask is not None:
if attn.dtype == torch.float16:
"""
-1e9 is overflow in fp16. It needs to be set a min.
Theoretically, the mask for empty token needs to be set as -inf. Check https://arxiv.org/pdf/1706.03762.pdf
"""
min_mask = -65504.0 #torch.finfo(torch.float16).min == -65504.0. jit scripting could handle finfo
else:
min_mask = -1e9
attn = attn.masked_fill(mask == 0, min_mask)
attn = self.dropout(F.softmax(attn, dim=-1))
output = torch.matmul(attn, v)
return output, attn
|
''' This module will handle the text generation with beam search. '''
import torch
import torch.nn as nn
import torch.nn.functional as F
from .Models import Transformer, get_pad_mask, get_subsequent_mask
class Translator(nn.Module):
''' Load a trained model and translate in beam search fashion. '''
def __init__(
self, model, beam_size, max_seq_len,
src_pad_idx, trg_pad_idx, trg_bos_idx, trg_eos_idx):
super(Translator, self).__init__()
self.alpha = 0.7
self.beam_size = beam_size
self.max_seq_len = max_seq_len
self.src_pad_idx = src_pad_idx
self.trg_bos_idx = trg_bos_idx
self.trg_eos_idx = trg_eos_idx
self.model = model
self.model.eval()
self.register_buffer('init_seq', torch.LongTensor([[trg_bos_idx]]))
self.register_buffer(
'blank_seqs',
torch.full((beam_size, max_seq_len), trg_pad_idx, dtype=torch.long))
self.blank_seqs[:, 0] = self.trg_bos_idx
self.register_buffer(
'len_map',
torch.arange(1, max_seq_len + 1, dtype=torch.long).unsqueeze(0))
def _model_decode(self, trg_seq, enc_output, src_mask):
trg_mask = get_subsequent_mask(trg_seq)
dec_output, *_ = self.model.decoder(trg_seq, trg_mask, enc_output, src_mask)
return F.softmax(self.model.trg_word_prj(dec_output), dim=-1)
def _get_init_state(self, src_seq, src_mask):
beam_size = self.beam_size
enc_output, *_ = self.model.encoder(src_seq, src_mask)
dec_output = self._model_decode(self.init_seq, enc_output, src_mask)
best_k_probs, best_k_idx = dec_output[:, -1, :].topk(beam_size)
scores = torch.log(best_k_probs).view(beam_size)
gen_seq = self.blank_seqs.clone().detach()
gen_seq[:, 1] = best_k_idx[0]
enc_output = enc_output.repeat(beam_size, 1, 1)
return enc_output, gen_seq, scores
def _get_the_best_score_and_idx(self, gen_seq, dec_output, scores, step):
assert len(scores.size()) == 1
beam_size = self.beam_size
# Get k candidates for each beam, k^2 candidates in total.
best_k2_probs, best_k2_idx = dec_output[:, -1, :].topk(beam_size)
# Include the previous scores.
scores = torch.log(best_k2_probs).view(beam_size, -1) + scores.view(beam_size, 1)
# Get the best k candidates from k^2 candidates.
scores, best_k_idx_in_k2 = scores.view(-1).topk(beam_size)
# Get the corresponding positions of the best k candidiates.
best_k_r_idxs, best_k_c_idxs = best_k_idx_in_k2 // beam_size, best_k_idx_in_k2 % beam_size
best_k_idx = best_k2_idx[best_k_r_idxs, best_k_c_idxs]
# Copy the corresponding previous tokens.
gen_seq[:, :step] = gen_seq[best_k_r_idxs, :step]
# Set the best tokens in this beam search step
gen_seq[:, step] = best_k_idx
return gen_seq, scores
def translate_sentence(self, src_seq):
# Only accept batch size equals to 1 in this function.
# TODO: expand to batch operation.
assert src_seq.size(0) == 1
src_pad_idx, trg_eos_idx = self.src_pad_idx, self.trg_eos_idx
max_seq_len, beam_size, alpha = self.max_seq_len, self.beam_size, self.alpha
with torch.no_grad():
src_mask = get_pad_mask(src_seq, src_pad_idx)
enc_output, gen_seq, scores = self._get_init_state(src_seq, src_mask)
ans_idx = 0 # default
for step in range(2, max_seq_len): # decode up to max length
dec_output = self._model_decode(gen_seq[:, :step], enc_output, src_mask)
gen_seq, scores = self._get_the_best_score_and_idx(gen_seq, dec_output, scores, step)
# Check if all path finished
# -- locate the eos in the generated sequences
eos_locs = gen_seq == trg_eos_idx
# -- replace the eos with its position for the length penalty use
seq_lens, _ = self.len_map.masked_fill(~eos_locs, max_seq_len).min(1)
# -- check if all beams contain eos
if (eos_locs.sum(1) > 0).sum(0).item() == beam_size:
# TODO: Try different terminate conditions.
_, ans_idx = scores.div(seq_lens.float() ** alpha).max(0)
ans_idx = ans_idx.item()
break
return gen_seq[ans_idx][:seq_lens[ans_idx]].tolist()
|
''' Define the Layers '''
import torch.nn as nn
import torch
from .SubLayers import MultiHeadAttention, PositionwiseFeedForward
from typing import Optional
__author__ = "Yu-Hsiang Huang"
class EncoderLayer(nn.Module):
''' Compose with two layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, enc_input, slf_attn_mask : Optional[torch.Tensor]=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output = self.pos_ffn(enc_output)
return enc_output, enc_slf_attn
class DecoderLayer(nn.Module):
''' Compose with three layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(DecoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(
self, dec_input, enc_output,
slf_attn_mask : Optional[torch.Tensor]=None, dec_enc_attn_mask : Optional[torch.Tensor]=None):
dec_output, dec_slf_attn = self.slf_attn(
dec_input, dec_input, dec_input, mask=slf_attn_mask)
dec_output, dec_enc_attn = self.enc_attn(
dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)
dec_output = self.pos_ffn(dec_output)
return dec_output, dec_slf_attn, dec_enc_attn
|
'''A wrapper class for scheduled optimizer '''
import numpy as np
class ScheduledOptim():
'''A simple wrapper class for learning rate scheduling'''
def __init__(self, optimizer, init_lr, d_model, n_warmup_steps):
self._optimizer = optimizer
self.init_lr = init_lr
self.d_model = d_model
self.n_warmup_steps = n_warmup_steps
self.n_steps = 0
def step_and_update_lr(self):
"Step with the inner optimizer"
self._update_learning_rate()
self._optimizer.step()
def zero_grad(self):
"Zero out the gradients with the inner optimizer"
self._optimizer.zero_grad()
def _get_lr_scale(self):
d_model = self.d_model
n_steps, n_warmup_steps = self.n_steps, self.n_warmup_steps
return (d_model ** -0.5) * min(n_steps ** (-0.5), n_steps * n_warmup_steps ** (-1.5))
def _update_learning_rate(self):
''' Learning rate scheduling per step '''
self.n_steps += 1
lr = self.init_lr * self._get_lr_scale()
for param_group in self._optimizer.param_groups:
param_group['lr'] = lr
|
# This file was adapted from
# https://github.com/facebookresearch/higher/blob/master/examples/maml-omniglot.py
# It comes with the following license.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from pathlib import Path
from typing import Tuple
import higher
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
# batch size in the traditional sense doesn't apply to this maml model.
# Instead, there is a task number (32 in this case) and K-shot
# (5 in this case) and these were chosen to be representative of
# the training done in the original MAML paper
# (https://arxiv.org/pdf/1703.03400.pdf)
#
# The goal of MAML is to train a model in a way that if one brings along
# a new task and K data points, then the model generalizes well on the
# test set for that task.
#
# The task number (also known as the meta-batch size) is the number of
# independent tasks the model gets trained on.
# K-shot means that each task only sees K data points.
#
# We've set the following variables to be equal to the task number.
DEFAULT_TRAIN_BSIZE = 5
DEFAULT_EVAL_BSIZE = 5
ALLOW_CUSTOMIZE_BSIZE = False
# TODO: There _should_ be a way to plug in an optim here, but this
# can be a next step. For now, the optim is not customizable.
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
n_way = 5
net = nn.Sequential(
nn.Conv2d(1, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(64, n_way)).to(device)
self.model = net
root = str(Path(__file__).parent)
self.meta_inputs = torch.load(f'{root}/batch.pt')
self.meta_inputs = tuple([torch.from_numpy(i).to(self.device) for i in self.meta_inputs])
self.example_inputs = (self.meta_inputs[0][0],)
def get_module(self):
return self.model, self.example_inputs
def train(self):
net, _ = self.get_module()
net.train()
x_spt, y_spt, x_qry, y_qry = self.meta_inputs
meta_opt = optim.Adam(net.parameters(), lr=1e-3)
if True:
task_num, setsz, c_, h, w = x_spt.size()
querysz = x_qry.size(1)
n_inner_iter = 5
inner_opt = torch.optim.SGD(net.parameters(), lr=1e-1)
meta_opt.zero_grad()
for i in range(task_num):
with higher.innerloop_ctx(
net, inner_opt, copy_initial_weights=False
) as (fnet, diffopt):
for _ in range(n_inner_iter):
spt_logits = fnet(x_spt[i])
spt_loss = F.cross_entropy(spt_logits, y_spt[i])
diffopt.step(spt_loss)
qry_logits = fnet(x_qry[i])
qry_loss = F.cross_entropy(qry_logits, y_qry[i])
qry_loss.backward()
meta_opt.step()
def eval(self) -> Tuple[torch.Tensor]:
model, (example_input,) = self.get_module()
model.eval()
with torch.no_grad():
out = model(example_input)
return (out, )
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 8
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(name="hf_DistilBert", test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-Detection/faster_rcnn_R_50_C4_1x.yaml", test=test, device=device,
jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
"Doctr recognition model"
from doctr.models import ocr_predictor
import numpy as np
import torch
# TorchBench imports
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
from typing import Tuple
class Model(BenchmarkModel):
task = COMPUTER_VISION.DETECTION
DEFAULT_EVAL_BSIZE = 1
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
predictor = ocr_predictor(det_arch='db_resnet50', reco_arch='crnn_vgg16_bn', pretrained=True).to(self.device)
# recognition model expects input (batch_size, 3, 32, 128)
self.model = predictor.reco_predictor.model
self.example_inputs = torch.randn(self.batch_size, 3, 32, 128).to(self.device)
if self.test == "eval":
self.model.eval()
def train(self):
raise NotImplementedError("Train is not implemented for this model.")
def get_module(self):
return self.model, (self.example_inputs, )
def eval(self) -> Tuple[torch.Tensor]:
with torch.inference_mode():
out = self.model(self.example_inputs, return_model_output=True)
return (out["out_map"], )
|
import os
import warnings
import subprocess
import sys
def pip_install_requirements():
try:
subprocess.check_call(["conda", "install", "-y", "expecttest", "libglib", "pango", "-c", "conda-forge"])
except:
warnings.warn("The doctr_reco_predictor model requires conda binary libaries to be installed. Missing conda packages might break this model.")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml", test=test, device=device,
jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Dora the Explorer, special thank to @pierrestock.
"""
import argparse
import json
import logging
import shlex
import subprocess as sp
import time
from collections import namedtuple
from functools import partial
from itertools import product # noqa
from pathlib import Path
import treetable as tt # really great package for ascii art tables
from demucs.parser import get_name, get_parser
logger = logging.getLogger(__name__)
parser = get_parser()
logs = Path("logs")
logs.mkdir(exist_ok=True)
Job = namedtuple("Job", "args name sid")
def fname(name, kind):
return logs / f"{name}.{kind}"
def get_sid(name):
sid_file = fname(name, "sid")
try:
return int(open(sid_file).read().strip())
except IOError:
return None
def cancel(sid):
sp.run(["scancel", str(sid)], check=True)
def reset_job(name):
sid_file = fname(name, "sid")
if sid_file.is_file():
sid_file.unlink()
def get_done(name):
done_file = fname(name, "done")
return done_file.exists()
def get_metrics(name):
json_file = fname(name, "json")
try:
return json.load(open(json_file))
except IOError:
return []
def schedule(name, args, nodes=2, partition="learnfair", time=2 * 24 * 60, large=True, gpus=8):
log = fname(name, "log")
command = [
"sbatch",
f"--job-name={name}",
f"--output={log}.%t",
"--mem=500G",
"--cpus-per-task=40",
f"--gres=gpu:{gpus}",
f"--nodes={nodes}",
"--tasks-per-node=1",
f"--partition={partition}",
f"--time={time}",
]
if large:
command += ["--constraint=volta32gb"]
srun_flags = f"--output={shlex.quote(str(log))}.%t"
run_cmd = ["#!/bin/bash"]
run_cmd.append(f"srun {srun_flags} python3 run_slurm.py " + " ".join(args))
result = sp.run(command, stdout=sp.PIPE, input="\n".join(run_cmd).encode('utf-8'),
check=True).stdout.decode('utf-8')
sid = int(result.strip().rsplit(' ', 1)[1])
open(fname(name, "sid"), "w").write(str(sid))
return sid
def _check(sids):
cs_ids = ','.join(map(str, sids))
result = sp.run(['squeue', f'-j{cs_ids}', '-o%A,%T,%P', '--noheader'],
check=True,
capture_output=True)
lines = result.stdout.decode('utf-8').strip().split('\n')
results = {}
for line in lines:
line = line.strip()
if not line:
continue
sid, status, partition = line.split(',', 2)
sid = int(sid)
results[sid] = status.lower()
for sid in sids:
if sid not in results:
results[sid] = 'failed'
return results
class Monitor:
def __init__(self, cancel=False, base=[]):
self.cancel = cancel
self.base = base
self.jobs = []
def schedule(self, args, *vargs, **kwargs):
args = self.base + args
name = get_name(parser, parser.parse_args(args))
sid = get_sid(name)
if sid is None and not self.cancel:
sid = schedule(name, args, *vargs, **kwargs)
self.jobs.append(Job(sid=sid, name=name, args=args))
def gc(self):
names = set(job.name for job in self.jobs)
for f in logs.iterdir():
stem, suffix = f.name.rsplit(".", 1)
if suffix == "sid":
if stem not in names:
sid = get_sid(stem)
if sid is not None:
print(f"GCing {stem} / {sid}")
cancel(sid)
f.unlink()
def check(self, trim=None, reset=False):
to_check = []
statuses = {}
for job in self.jobs:
if get_done(job.name):
statuses[job.sid] = "done"
elif job.sid is not None:
to_check.append(job.sid)
statuses.update(_check(to_check))
if trim is not None:
trim = len(get_metrics(self.jobs[trim].name))
lines = []
for index, job in enumerate(self.jobs):
status = statuses.get(job.sid, "failed")
if status in ["failed", "completing"] and reset:
reset_job(job.name)
status = "reset"
meta = {'name': job.name, 'sid': job.sid, 'status': status[:2], "index": index}
metrics = get_metrics(job.name)
if trim is not None:
metrics = metrics[:trim]
meta["epoch"] = len(metrics)
if metrics:
metrics = metrics[-1]
else:
metrics = {}
lines.append({'meta': meta, 'metrics': metrics})
table = tt.table(shorten=True,
groups=[
tt.group("meta", [
tt.leaf("index", align=">"),
tt.leaf("name"),
tt.leaf("sid", align=">"),
tt.leaf("status"),
tt.leaf("epoch", align=">")
]),
tt.group("metrics", [
tt.leaf("train", ".2%"),
tt.leaf("valid", ".2%"),
tt.leaf("best", ".2%"),
])
])
print(tt.treetable(lines, table, colors=["30", "39"]))
def main():
parser = argparse.ArgumentParser("grid.py")
parser.add_argument("-c", "--cancel", action="store_true", help="Cancel all jobs")
parser.add_argument(
"-r",
"--reset",
action="store_true",
help="Will reset the state of failed jobs. Next invocation will reschedule them")
parser.add_argument("-t", "--trim", type=int, help="Trim metrics to match job with given index")
args = parser.parse_args()
monitor = Monitor(base=[], cancel=args.cancel)
sched = partial(monitor.schedule, nodes=1)
tasnet = ["--tasnet", "--split_valid", "--samples=80000", "--X=10"]
extra_path = Path.home() / "musdb_raw_44_allstems"
extra = [f"--raw={extra_path}"]
# Main models
for seed in [42, 43, 44, 45, 46]:
base = [f"--seed={seed}"]
sched(base)
sched(base + extra)
sched(base + tasnet + ["-e", "180"])
sched(base + tasnet + extra)
# Optimality of parameters
for channels, lr, seed in product([64, 80, 100], [3e-4, 5e-4], [42, 43, 44]):
cmd = [f"--channels={channels}", f"--lr={lr}", f"--seed={seed}"]
sched(cmd)
for rescale in [0.01, 0.05, 0.1]:
sched([f"--rescale={rescale}"])
# Ablation study
sched(["--no_glu"])
sched(["--no_rewrite"])
sched(["--context=1"])
sched(["--rescale=0"])
sched(["--mse"])
sched(["--lstm_layers=0"])
sched(["--no_glu", "--rescale=0"])
if args.cancel:
for job in monitor.jobs:
if job.sid is not None:
print(f"Canceling {job.name}/{job.sid}")
cancel(job.sid)
return
names = [job.name for job in monitor.jobs]
json.dump(names, open(logs / "herd.json", "w"))
# Cancel any running job that was removed from the above sched calls.
monitor.gc()
while True:
if args.reset:
monitor.check(reset=True)
return
monitor.check(trim=args.trim)
time.sleep(5 * 60)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Run training locally on all visible GPUs. Start only
one task per node as this script will spawn one child for each GPU.
"""
import subprocess as sp
import sys
import time
import torch as th
from demucs.utils import free_port
def main():
args = sys.argv[1:]
gpus = th.cuda.device_count()
port = free_port()
args += ["--world_size", str(gpus), "--master", f"127.0.0.1:{port}"]
tasks = []
for gpu in range(gpus):
kwargs = {}
if gpu > 0:
kwargs['stdin'] = sp.DEVNULL
kwargs['stdout'] = sp.DEVNULL
# We keep stderr to see tracebacks from children.
tasks.append(sp.Popen(["python3", "-m", "demucs"] + args + ["--rank", str(gpu)], **kwargs))
tasks[-1].rank = gpu
failed = False
try:
while tasks:
for task in tasks:
try:
exitcode = task.wait(0.1)
except sp.TimeoutExpired:
continue
else:
tasks.remove(task)
if exitcode:
print(f"Task {task.rank} died with exit code "
f"{exitcode}",
file=sys.stderr)
failed = True
if failed:
break
time.sleep(1)
except KeyboardInterrupt:
for task in tasks:
task.terminate()
raise
if failed:
for task in tasks:
task.terminate()
sys.exit(1)
if __name__ == "__main__":
main()
|
import torch
import sys
a = torch.load(sys.argv[1])
b = torch.load(sys.argv[2])
torch.testing.assert_allclose(a,b, rtol=0.01, atol=0.01)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Quantize a pre-trained model. Just pass the path to the model to this script
and it will save a gzipped compressed version of the model with the weights quantized
over 8 bits. The model is still stored as floats, but gzip finds out on it own
that only 256 different float values exist and do the compression for us.
"""
import sys
from demucs.utils import load_model, save_model
def quantize(p, level=256):
scale = p.abs().max()
fac = 2 * scale / (level - 1)
q = ((p + scale) / fac).round()
p = q * fac - scale
return p
def main():
path = sys.argv[1]
level = 256
min_mb = 20
if len(sys.argv) >= 3:
level = int(sys.argv[2])
if len(sys.argv) >= 4:
min_mb = float(sys.argv[3])
print(path, level, min_mb)
model = load_model(path)
for p in model.parameters():
if p.numel() >= min_mb * 2**20 / 4:
p.data[:] = quantize(p.data, level)
save_model(model, path + ".gz")
if __name__ == "__main__":
main()
|
import json
import torch
import random
import numpy as np
from fractions import Fraction
from .demucs.model import Demucs
from .demucs.parser import get_name, get_parser
from .demucs.augment import FlipChannels, FlipSign, Remix, Shift
from .demucs.utils import capture_init, center_trim
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import OTHER
from torch import Tensor
from torch.nn.modules.container import Sequential
from torchbenchmark.models.demucs.demucs.model import Demucs
from typing import Optional, Tuple
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
class DemucsWrapper(torch.nn.Module):
def __init__(self, model: Demucs, augment: Sequential) -> None:
super(DemucsWrapper, self).__init__()
self.model = model
self.augment = augment
def forward(self, streams) -> Tuple[Tensor, Tensor]:
sources = streams[:, 1:]
sources = self.augment(sources)
mix = sources.sum(dim=1)
return sources, self.model(mix)
class Model(BenchmarkModel):
task = OTHER.OTHER_TASKS
# Original train batch size: 64
# Source: https://github.com/facebookresearch/demucs/blob/3e5ea549ba921316c587e5f03c0afc0be47a0ced/conf/config.yaml#L37
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 8
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]) -> None:
# reduce the eval batch size when running on CPU
# see: https://github.com/pytorch/benchmark/issues/895
if device == "cpu":
self.DEFAULT_EVAL_BSIZE = max(1, int(self.DEFAULT_EVAL_BSIZE / 8))
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
self.parser = get_parser()
self.args = self.parser.parse_args([])
args = self.args
model = Demucs(channels=64)
model.to(device)
samples = 80000
self.duration = Fraction(samples + args.data_stride, args.samplerate)
self.stride = Fraction(args.data_stride, args.samplerate)
if args.mse:
self.criterion = torch.nn.MSELoss()
else:
self.criterion = torch.nn.L1Loss()
if args.augment:
self.augment = torch.nn.Sequential(FlipSign(), FlipChannels(), Shift(args.data_stride),
Remix(group_size=args.remix_group_size)).to(device)
else:
self.augment = Shift(args.data_stride)
self.model = DemucsWrapper(model, self.augment)
if test == "train":
self.model.train()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr)
elif test == "eval":
self.model.eval()
self.example_inputs = (torch.rand([self.batch_size, 5, 2, 426888], device=device),)
def get_module(self) -> Tuple[DemucsWrapper, Tuple[Tensor]]:
return self.model, self.example_inputs
def eval(self) -> Tuple[torch.Tensor]:
sources, estimates = self.model(*self.example_inputs)
sources = center_trim(sources, estimates)
loss = self.criterion(estimates, sources)
return (sources, estimates)
def train(self):
sources, estimates = self.model(*self.example_inputs)
sources = center_trim(sources, estimates)
loss = self.criterion(estimates, sources)
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gzip
import json
import sys
from collections import defaultdict
from pathlib import Path
import numpy as np
import treetable as tt
BASELINES = [
'WaveUNet',
'MMDenseLSTM',
'OpenUnmix',
'IRM2',
]
EVALS = Path("evals")
LOGS = Path("logs")
BASELINE_EVALS = Path("baselines")
STD_KEY = "seed"
parser = argparse.ArgumentParser("result_table.py")
parser.add_argument("-p",
"--paper",
action="store_true",
help="show results from the paper experiment")
parser.add_argument("-i", "--individual", action="store_true", help="no aggregation by seed")
parser.add_argument("-l", "--latex", action="store_true", help="output easy to copy latex")
parser.add_argument("metric", default="SDR", nargs="?")
args = parser.parse_args()
if args.paper:
EVALS = Path("results/evals")
LOGS = Path("results/logs")
def read_track(metric, results, pool=np.nanmedian):
all_metrics = {}
for target in results["targets"]:
source = target["name"]
metrics = [frame["metrics"][metric] for frame in target["frames"]]
metrics = pool(metrics)
all_metrics[source] = metrics
return all_metrics
def read(metric, path, pool=np.nanmedian):
all_metrics = defaultdict(list)
for f in path.iterdir():
if f.name.endswith(".json.gz"):
results = json.load(gzip.open(f, "r"))
metrics = read_track(metric, results, pool=pool)
for source, value in metrics.items():
all_metrics[source].append(value)
return {key: np.array(value) for key, value in all_metrics.items()}
all_stats = defaultdict(list)
for name in BASELINES:
all_stats[name] = [read(args.metric, BASELINE_EVALS / name / "test")]
for path in EVALS.iterdir():
results = path / "results" / "test"
if not results.exists():
continue
if not args.paper and not (LOGS / (path.name + ".done")).exists():
continue
name = path.name
model = "Demucs"
if "tasnet" in name:
model = "Tasnet"
if name == "default":
parts = []
else:
parts = [p.split("=") for p in name.split(" ") if "tasnet" not in p]
if not args.individual:
parts = [(k, v) for k, v in parts if k != STD_KEY]
name = model + " " + " ".join(f"{k}={v}" for k, v in parts)
stats = read(args.metric, results)
if (not stats or len(stats["drums"]) != 50):
print(f"Missing stats for {results}", file=sys.stderr)
else:
all_stats[name].append(stats)
metrics = [tt.leaf("score", ".2f"), tt.leaf("std", ".2f")]
sources = ["drums", "bass", "other", "vocals"]
mytable = tt.table([tt.leaf("name"), tt.group("all", metrics + [tt.leaf("count")])] +
[tt.group(source, metrics) for idx, source in enumerate(sources)])
lines = []
for name, stats in all_stats.items():
line = {"name": name}
if 'accompaniment' in stats:
del stats['accompaniment']
alls = []
for source in sources:
stat = [np.nanmedian(s[source]) for s in stats]
alls.append(stat)
line[source] = {"score": np.mean(stat), "std": np.std(stat) / len(stat)**0.5}
alls = np.array(alls)
line["all"] = {
"score": alls.mean(),
"std": alls.mean(0).std() / alls.shape[1]**0.5,
"count": alls.shape[1]
}
lines.append(line)
def latex_number(m):
out = f"{m['score']:.2f}"
if m["std"] > 0:
std = "{:.2f}".format(m["std"])[1:]
out += f" $\\scriptstyle\\pm {std}$"
return out
lines.sort(key=lambda x: -x["all"]["score"])
if args.latex:
for line in lines:
cols = [
line['name'],
latex_number(line["all"]),
latex_number(line["drums"]),
latex_number(line["bass"]),
latex_number(line["other"]),
latex_number(line["vocals"])
]
print(" & ".join(cols) + r" \\")
else:
print(tt.treetable(lines, mytable, colors=['33', '0']))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
from collections import defaultdict
from pathlib import Path
import numpy as np
import treetable as tt
LOGS = Path("logs")
STD_KEY = "seed"
METRIC = "best"
parser = argparse.ArgumentParser("result_table.py")
parser.add_argument("-p",
"--paper",
action="store_true",
help="show results from the paper experiment")
parser.add_argument("-i", "--individual", action="store_true", help="no aggregation by seed")
args = parser.parse_args()
if args.paper:
LOGS = Path("results/logs")
all_stats = defaultdict(list)
for path in LOGS.iterdir():
if path.suffix == ".json" and (args.paper or path.with_suffix(".done").exists()):
metric = json.load(open(path))[-1][METRIC]
name = path.stem
model = "Demucs"
if "tasnet" in name:
model = "Tasnet"
if name == "default":
parts = []
else:
parts = [p.split("=") for p in name.split(" ") if "tasnet" not in p]
if not args.individual:
parts = [(k, v) for k, v in parts if k != STD_KEY]
name = model + " " + " ".join(f"{k}={v}" for k, v in parts)
all_stats[name].append(metric)
metrics = [tt.leaf("score", ".4f"), tt.leaf("std", ".3f"), tt.leaf("count", ".2f")]
mytable = tt.table([tt.leaf("name"), tt.group("valid", metrics)])
lines = []
for name, stats in all_stats.items():
line = {"name": name}
stats = np.array(stats)
line["valid"] = {
"score": stats.mean(),
"std": stats.std() / stats.shape[0]**0.5,
"count": stats.shape[0]
}
lines.append(line)
lines.sort(key=lambda x: x["valid"]["score"])
print(tt.treetable(lines, mytable, colors=['33', '0']))
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
def spacy_download(language):
pass
def preprocess():
pass
if __name__ == '__main__':
pip_install_requirements()
spacy_download('')
preprocess()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Run training from Slurm on all visible GPUs. Start only
one task per node as this script will spawn one child for each GPU.
This will not schedule a job but instead should be launched from srun/sbatch.
"""
import os
import subprocess as sp
import sys
import time
import torch as th
from demucs.utils import free_port
def main():
args = sys.argv[1:]
gpus = th.cuda.device_count()
n_nodes = int(os.environ['SLURM_JOB_NUM_NODES'])
node_id = int(os.environ['SLURM_NODEID'])
job_id = int(os.environ['SLURM_JOBID'])
rank_offset = gpus * node_id
hostnames = sp.run(['scontrol', 'show', 'hostnames', os.environ['SLURM_JOB_NODELIST']],
capture_output=True,
check=True).stdout
master_addr = hostnames.split()[0].decode('utf-8')
if n_nodes == 1:
port = free_port()
else:
port = 20_000 + (job_id % 40_000)
args += ["--world_size", str(n_nodes * gpus), "--master", f"{master_addr}:{port}"]
tasks = []
print("About to go live", master_addr, node_id, n_nodes, file=sys.stderr)
sys.stderr.flush()
for gpu in range(gpus):
kwargs = {}
if gpu > 0:
kwargs['stdin'] = sp.DEVNULL
kwargs['stdout'] = sp.DEVNULL
# We keep stderr to see tracebacks from children.
tasks.append(
sp.Popen(["python3", "-m", "demucs"] + args +
["--rank", str(rank_offset + gpu)], **kwargs))
tasks[-1].rank = rank_offset + gpu
failed = False
try:
while tasks:
for task in tasks:
try:
exitcode = task.wait(0.1)
except sp.TimeoutExpired:
continue
else:
tasks.remove(task)
if exitcode:
print(f"Task {task.rank} died with exit code "
f"{exitcode}",
file=sys.stderr)
failed = True
else:
print(f"Task {task.rank} exited successfully")
if failed:
break
time.sleep(1)
except KeyboardInterrupt:
for task in tasks:
task.terminate()
raise
if failed:
for task in tasks:
task.terminate()
sp.run(["scancel", str(job_id)], check=True)
sys.exit(1)
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from collections import defaultdict, namedtuple
from pathlib import Path
import musdb
import numpy as np
import torch as th
import tqdm
from torch.utils.data import DataLoader
from .audio import AudioFile
ChunkInfo = namedtuple("ChunkInfo", ["file_index", "offset", "local_index"])
class Rawset:
"""
Dataset of raw, normalized, float32 audio files
"""
def __init__(self, path, samples=None, stride=None, channels=2, streams=None):
self.path = Path(path)
self.channels = channels
self.samples = samples
if stride is None:
stride = samples if samples is not None else 0
self.stride = stride
entries = defaultdict(list)
for root, folders, files in os.walk(self.path, followlinks=True):
folders.sort()
files.sort()
for file in files:
if file.endswith(".raw"):
path = Path(root) / file
name, stream = path.stem.rsplit('.', 1)
entries[(path.parent.relative_to(self.path), name)].append(int(stream))
self._entries = list(entries.keys())
sizes = []
self._lengths = []
ref_streams = sorted(entries[self._entries[0]])
assert ref_streams == list(range(len(ref_streams)))
if streams is None:
self.streams = ref_streams
else:
self.streams = streams
for entry in sorted(entries.keys()):
streams = entries[entry]
assert sorted(streams) == ref_streams
file = self._path(*entry)
length = file.stat().st_size // (4 * channels)
if samples is None:
sizes.append(1)
else:
if length < samples:
self._entries.remove(entry)
continue
sizes.append((length - samples) // stride + 1)
self._lengths.append(length)
if not sizes:
raise ValueError(f"Empty dataset {self.path}")
self._cumulative_sizes = np.cumsum(sizes)
self._sizes = sizes
def __len__(self):
return self._cumulative_sizes[-1]
@property
def total_length(self):
return sum(self._lengths)
def chunk_info(self, index):
file_index = np.searchsorted(self._cumulative_sizes, index, side='right')
if file_index == 0:
local_index = index
else:
local_index = index - self._cumulative_sizes[file_index - 1]
return ChunkInfo(offset=local_index * self.stride,
file_index=file_index,
local_index=local_index)
def _path(self, folder, name, stream=0):
return self.path / folder / (name + f'.{stream}.raw')
def __getitem__(self, index):
chunk = self.chunk_info(index)
entry = self._entries[chunk.file_index]
length = self.samples or self._lengths[chunk.file_index]
streams = []
to_read = length * self.channels * 4
for stream_index, stream in enumerate(self.streams):
offset = chunk.offset * 4 * self.channels
file = open(self._path(*entry, stream=stream), 'rb')
file.seek(offset)
content = file.read(to_read)
assert len(content) == to_read
content = np.frombuffer(content, dtype=np.float32)
streams.append(th.from_numpy(content).view(length, self.channels).t())
return th.stack(streams, dim=0)
def name(self, index):
chunk = self.chunk_info(index)
folder, name = self._entries[chunk.file_index]
return folder / name
class MusDBSet:
def __init__(self, mus, streams=slice(None), samplerate=44100, channels=2):
self.mus = mus
self.streams = streams
self.samplerate = samplerate
self.channels = channels
def __len__(self):
return len(self.mus.tracks)
def __getitem__(self, index):
track = self.mus.tracks[index]
return (track.name, AudioFile(track.path).read(channels=self.channels,
seek_time=0,
streams=self.streams,
samplerate=self.samplerate))
def build_raw(mus, destination, normalize, workers, samplerate, channels):
destination.mkdir(parents=True, exist_ok=True)
loader = DataLoader(MusDBSet(mus, channels=channels, samplerate=samplerate),
batch_size=1,
num_workers=workers,
collate_fn=lambda x: x[0])
for name, streams in tqdm.tqdm(loader):
if normalize:
ref = streams[0].mean(dim=0) # use mono mixture as reference
streams = (streams - ref.mean()) / ref.std()
for index, stream in enumerate(streams):
open(destination / (name + f'.{index}.raw'), "wb").write(stream.t().numpy().tobytes())
def main():
parser = argparse.ArgumentParser('rawset')
parser.add_argument('--workers', type=int, default=10)
parser.add_argument('--samplerate', type=int, default=44100)
parser.add_argument('--channels', type=int, default=2)
parser.add_argument('musdb', type=Path)
parser.add_argument('destination', type=Path)
args = parser.parse_args()
build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="train"),
args.destination / "train",
normalize=True,
channels=args.channels,
samplerate=args.samplerate,
workers=args.workers)
build_raw(musdb.DB(root=args.musdb, subsets=["train"], split="valid"),
args.destination / "valid",
normalize=True,
samplerate=args.samplerate,
channels=args.channels,
workers=args.workers)
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from concurrent import futures
import musdb
from .audio import AudioFile
def get_musdb_tracks(root, *args, **kwargs):
mus = musdb.DB(root, *args, **kwargs)
return {track.name: track.path for track in mus}
class StemsSet:
def __init__(self, tracks, metadata, duration=None, stride=1, samplerate=44100, channels=2):
self.metadata = []
for name, path in tracks.items():
meta = dict(metadata[name])
meta["path"] = path
meta["name"] = name
self.metadata.append(meta)
if duration is not None and meta["duration"] < duration:
raise ValueError(f"Track {name} duration is too small {meta['duration']}")
self.metadata.sort(key=lambda x: x["name"])
self.duration = duration
self.stride = stride
self.channels = channels
self.samplerate = samplerate
def __len__(self):
return sum(self._examples_count(m) for m in self.metadata)
def _examples_count(self, meta):
if self.duration is None:
return 1
else:
return int((meta["duration"] - self.duration) // self.stride + 1)
def track_metadata(self, index):
for meta in self.metadata:
examples = self._examples_count(meta)
if index >= examples:
index -= examples
continue
return meta
def __getitem__(self, index):
for meta in self.metadata:
examples = self._examples_count(meta)
if index >= examples:
index -= examples
continue
streams = AudioFile(meta["path"]).read(seek_time=index * self.stride,
duration=self.duration,
channels=self.channels,
samplerate=self.samplerate)
return (streams - meta["mean"]) / meta["std"]
def _get_track_metadata(path):
# use mono at 44kHz as reference. For any other settings data won't be perfectly
# normalized but it should be good enough.
audio = AudioFile(path)
mix = audio.read(streams=0, channels=1, samplerate=44100)
return {"duration": audio.duration, "std": mix.std().item(), "mean": mix.mean().item()}
def build_metadata(tracks):
return {name: _get_track_metadata(path) for name, path in tracks.items()}
def build_musdb_metadata(path, musdb, workers):
tracks = get_musdb_tracks(musdb)
metadata = build_metadata(tracks)
path.parent.mkdir(exist_ok=True, parents=True)
json.dump(metadata, open(path, "w"))
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Created on 2018/12
# Author: Kaituo XU
# Modified on 2019/11 by Alexandre Defossez, added support for multiple output channels
# Here is the original license:
# The MIT License (MIT)
#
# Copyright (c) 2018 Kaituo XU
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import capture_init
EPS = 1e-8
def overlap_and_add(signal, frame_step):
outer_dimensions = signal.size()[:-2]
frames, frame_length = signal.size()[-2:]
subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor
subframe_step = frame_step // subframe_length
subframes_per_frame = frame_length // subframe_length
output_size = frame_step * (frames - 1) + frame_length
output_subframes = output_size // subframe_length
subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)
frame = torch.arange(0, output_subframes,
device=signal.device).unfold(0, subframes_per_frame, subframe_step)
frame = frame.long() # signal may in GPU or CPU
frame = frame.contiguous().view(-1)
result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length)
result.index_add_(-2, frame, subframe_signal)
result = result.view(*outer_dimensions, -1)
return result
class ConvTasNet(nn.Module):
@capture_init
def __init__(self,
N=256,
L=20,
B=256,
H=512,
P=3,
X=8,
R=4,
C=4,
audio_channels=1,
norm_type="gLN",
causal=False,
mask_nonlinear='relu'):
"""
Args:
N: Number of filters in autoencoder
L: Length of the filters (in samples)
B: Number of channels in bottleneck 1 × 1-conv block
H: Number of channels in convolutional blocks
P: Kernel size in convolutional blocks
X: Number of convolutional blocks in each repeat
R: Number of repeats
C: Number of speakers
norm_type: BN, gLN, cLN
causal: causal or non-causal
mask_nonlinear: use which non-linear function to generate mask
"""
super(ConvTasNet, self).__init__()
# Hyper-parameter
self.N, self.L, self.B, self.H, self.P, self.X, self.R, self.C = N, L, B, H, P, X, R, C
self.norm_type = norm_type
self.causal = causal
self.mask_nonlinear = mask_nonlinear
# Components
self.encoder = Encoder(L, N, audio_channels)
self.separator = TemporalConvNet(N, B, H, P, X, R, C, norm_type, causal, mask_nonlinear)
self.decoder = Decoder(N, L, audio_channels)
# init
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_normal_(p)
def valid_length(self, length):
return length
def forward(self, mixture):
"""
Args:
mixture: [M, T], M is batch size, T is #samples
Returns:
est_source: [M, C, T]
"""
mixture_w = self.encoder(mixture)
est_mask = self.separator(mixture_w)
est_source = self.decoder(mixture_w, est_mask)
# T changed after conv1d in encoder, fix it here
T_origin = mixture.size(-1)
T_conv = est_source.size(-1)
est_source = F.pad(est_source, (0, T_origin - T_conv))
return est_source
class Encoder(nn.Module):
"""Estimation of the nonnegative mixture weight by a 1-D conv layer.
"""
def __init__(self, L, N, audio_channels):
super(Encoder, self).__init__()
# Hyper-parameter
self.L, self.N = L, N
# Components
# 50% overlap
self.conv1d_U = nn.Conv1d(audio_channels, N, kernel_size=L, stride=L // 2, bias=False)
def forward(self, mixture):
"""
Args:
mixture: [M, T], M is batch size, T is #samples
Returns:
mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1
"""
mixture_w = F.relu(self.conv1d_U(mixture)) # [M, N, K]
return mixture_w
class Decoder(nn.Module):
def __init__(self, N, L, audio_channels):
super(Decoder, self).__init__()
# Hyper-parameter
self.N, self.L = N, L
self.audio_channels = audio_channels
# Components
self.basis_signals = nn.Linear(N, audio_channels * L, bias=False)
def forward(self, mixture_w, est_mask):
"""
Args:
mixture_w: [M, N, K]
est_mask: [M, C, N, K]
Returns:
est_source: [M, C, T]
"""
# D = W * M
source_w = torch.unsqueeze(mixture_w, 1) * est_mask # [M, C, N, K]
source_w = torch.transpose(source_w, 2, 3) # [M, C, K, N]
# S = DV
est_source = self.basis_signals(source_w) # [M, C, K, ac * L]
m, c, k, _ = est_source.size()
est_source = est_source.view(m, c, k, self.audio_channels, -1).transpose(2, 3).contiguous()
est_source = overlap_and_add(est_source, self.L // 2) # M x C x ac x T
return est_source
class TemporalConvNet(nn.Module):
def __init__(self, N, B, H, P, X, R, C, norm_type="gLN", causal=False, mask_nonlinear='relu'):
"""
Args:
N: Number of filters in autoencoder
B: Number of channels in bottleneck 1 × 1-conv block
H: Number of channels in convolutional blocks
P: Kernel size in convolutional blocks
X: Number of convolutional blocks in each repeat
R: Number of repeats
C: Number of speakers
norm_type: BN, gLN, cLN
causal: causal or non-causal
mask_nonlinear: use which non-linear function to generate mask
"""
super(TemporalConvNet, self).__init__()
# Hyper-parameter
self.C = C
self.mask_nonlinear = mask_nonlinear
# Components
# [M, N, K] -> [M, N, K]
layer_norm = ChannelwiseLayerNorm(N)
# [M, N, K] -> [M, B, K]
bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False)
# [M, B, K] -> [M, B, K]
repeats = []
for r in range(R):
blocks = []
for x in range(X):
dilation = 2**x
padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2
blocks += [
TemporalBlock(B,
H,
P,
stride=1,
padding=padding,
dilation=dilation,
norm_type=norm_type,
causal=causal)
]
repeats += [nn.Sequential(*blocks)]
temporal_conv_net = nn.Sequential(*repeats)
# [M, B, K] -> [M, C*N, K]
mask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False)
# Put together
self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net,
mask_conv1x1)
def forward(self, mixture_w):
"""
Keep this API same with TasNet
Args:
mixture_w: [M, N, K], M is batch size
returns:
est_mask: [M, C, N, K]
"""
M, N, K = mixture_w.size()
score = self.network(mixture_w) # [M, N, K] -> [M, C*N, K]
score = score.view(M, self.C, N, K) # [M, C*N, K] -> [M, C, N, K]
if self.mask_nonlinear == 'softmax':
est_mask = F.softmax(score, dim=1)
elif self.mask_nonlinear == 'relu':
est_mask = F.relu(score)
else:
raise ValueError("Unsupported mask non-linear function")
return est_mask
class TemporalBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False):
super(TemporalBlock, self).__init__()
# [M, B, K] -> [M, H, K]
conv1x1 = nn.Conv1d(in_channels, out_channels, 1, bias=False)
prelu = nn.PReLU()
norm = chose_norm(norm_type, out_channels)
# [M, H, K] -> [M, B, K]
dsconv = DepthwiseSeparableConv(out_channels, in_channels, kernel_size, stride, padding,
dilation, norm_type, causal)
# Put together
self.net = nn.Sequential(conv1x1, prelu, norm, dsconv)
def forward(self, x):
"""
Args:
x: [M, B, K]
Returns:
[M, B, K]
"""
residual = x
out = self.net(x)
# TODO: when P = 3 here works fine, but when P = 2 maybe need to pad?
return out + residual # look like w/o F.relu is better than w/ F.relu
# return F.relu(out + residual)
class DepthwiseSeparableConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
norm_type="gLN",
causal=False):
super(DepthwiseSeparableConv, self).__init__()
# Use `groups` option to implement depthwise convolution
# [M, H, K] -> [M, H, K]
depthwise_conv = nn.Conv1d(in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=False)
if causal:
chomp = Chomp1d(padding)
prelu = nn.PReLU()
norm = chose_norm(norm_type, in_channels)
# [M, H, K] -> [M, B, K]
pointwise_conv = nn.Conv1d(in_channels, out_channels, 1, bias=False)
# Put together
if causal:
self.net = nn.Sequential(depthwise_conv, chomp, prelu, norm, pointwise_conv)
else:
self.net = nn.Sequential(depthwise_conv, prelu, norm, pointwise_conv)
def forward(self, x):
"""
Args:
x: [M, H, K]
Returns:
result: [M, B, K]
"""
return self.net(x)
class Chomp1d(nn.Module):
"""To ensure the output length is the same as the input.
"""
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
"""
Args:
x: [M, H, Kpad]
Returns:
[M, H, K]
"""
return x[:, :, :-self.chomp_size].contiguous()
def chose_norm(norm_type, channel_size):
"""The input of normlization will be (M, C, K), where M is batch size,
C is channel size and K is sequence length.
"""
if norm_type == "gLN":
return GlobalLayerNorm(channel_size)
elif norm_type == "cLN":
return ChannelwiseLayerNorm(channel_size)
elif norm_type == "id":
return nn.Identity()
else: # norm_type == "BN":
# Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics
# along M and K, so this BN usage is right.
return nn.BatchNorm1d(channel_size)
# TODO: Use nn.LayerNorm to impl cLN to speed up
class ChannelwiseLayerNorm(nn.Module):
"""Channel-wise Layer Normalization (cLN)"""
def __init__(self, channel_size):
super(ChannelwiseLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.reset_parameters()
def reset_parameters(self):
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""
Args:
y: [M, N, K], M is batch size, N is channel size, K is length
Returns:
cLN_y: [M, N, K]
"""
mean = torch.mean(y, dim=1, keepdim=True) # [M, 1, K]
var = torch.var(y, dim=1, keepdim=True, unbiased=False) # [M, 1, K]
cLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
return cLN_y
class GlobalLayerNorm(nn.Module):
"""Global Layer Normalization (gLN)"""
def __init__(self, channel_size):
super(GlobalLayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1]
self.reset_parameters()
def reset_parameters(self):
self.gamma.data.fill_(1)
self.beta.data.zero_()
def forward(self, y):
"""
Args:
y: [M, N, K], M is batch size, N is channel size, K is length
Returns:
gLN_y: [M, N, K]
"""
# TODO: in torch 1.0, torch.mean() support dim list
mean = y.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) # [M, 1, 1]
var = (torch.pow(y - mean, 2)).mean(dim=1, keepdim=True).mean(dim=2, keepdim=True)
gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta
return gLN_y
if __name__ == "__main__":
torch.manual_seed(123)
M, N, L, T = 2, 3, 4, 12
K = 2 * T // L - 1
B, H, P, X, R, C, norm_type, causal = 2, 3, 3, 3, 2, 2, "gLN", False
mixture = torch.randint(3, (M, T))
# test Encoder
encoder = Encoder(L, N)
encoder.conv1d_U.weight.data = torch.randint(2, encoder.conv1d_U.weight.size())
mixture_w = encoder(mixture)
print('mixture', mixture)
print('U', encoder.conv1d_U.weight)
print('mixture_w', mixture_w)
print('mixture_w size', mixture_w.size())
# test TemporalConvNet
separator = TemporalConvNet(N, B, H, P, X, R, C, norm_type=norm_type, causal=causal)
est_mask = separator(mixture_w)
print('est_mask', est_mask)
# test Decoder
decoder = Decoder(N, L)
est_mask = torch.randint(2, (B, K, C, N))
est_source = decoder(mixture_w, est_mask)
print('est_source', est_source)
# test Conv-TasNet
conv_tasnet = ConvTasNet(N, L, B, H, P, X, R, C, norm_type=norm_type)
est_source = conv_tasnet(mixture)
print('est_source', est_source)
print('est_source size', est_source.size())
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import hashlib
import sys
from pathlib import Path
import requests
import torch as th
import tqdm
from scipy.io import wavfile
from .audio import AudioFile
from .utils import apply_model, load_model
BASE_URL = "https://dl.fbaipublicfiles.com/demucs/v2.0/"
PRETRAINED_MODELS = {
'demucs.th': 'f6c4148ba0dc92242d82d7b3f2af55c77bd7cb4ff1a0a3028a523986f36a3cfd',
'demucs.th.gz': 'e70767bfc9ce62c26c200477ea29a20290c708b210977e3ef2c75ace68ea4be1',
'demucs_extra.th': '3331bcc5d09ba1d791c3cf851970242b0bb229ce81dbada557b6d39e8c6a6a87',
'demucs_extra.th.gz': 'f9edcf7fe55ea5ac9161c813511991e4ba03188112fd26a9135bc9308902a094',
'light.th': '79d1ee3c1541c729c552327756954340a1a46a11ce0009dea77dc583e4b6269c',
'light.th.gz': '94c091021d8cdee0806b6df0afbeb59e73e989dbc2c16d2c1c370b2edce774fd',
'light_extra.th': '9e9b4af564229c80cc73c95d02d2058235bb054c6874b3cba4d5b26943a5ddcb',
'light_extra.th.gz': '48bb1a85f5ad0ca400512fcd0dcf91ec94e886a1602a552ee32133f5e09aeae0',
'tasnet.th': 'be56693f6a5c4854b124f95bb9dd043f3167614898493738ab52e25648bec8a2',
'tasnet_extra.th': '0ccbece3acd98785a367211c9c35b1eadae8d148b0d37fe5a5494d6d335269b5',
}
def download_file(url, target):
"""
Download a file with a progress bar.
Arguments:
url (str): url to download
target (Path): target path to write to
sha256 (str or None): expected sha256 hexdigest of the file
"""
def _download():
response = requests.get(url, stream=True)
total_length = int(response.headers.get('content-length', 0))
with tqdm.tqdm(total=total_length, ncols=120, unit="B", unit_scale=True) as bar:
with open(target, "wb") as output:
for data in response.iter_content(chunk_size=4096):
output.write(data)
bar.update(len(data))
try:
_download()
except: # noqa, re-raising
if target.exists():
target.unlink()
raise
def verify_file(target, sha256):
hasher = hashlib.sha256()
with open(target, "rb") as f:
while True:
data = f.read(65536)
if not data:
break
hasher.update(data)
signature = hasher.hexdigest()
if signature != sha256:
print(
f"Invalid sha256 signature for the file {target}. Expected {sha256} but got "
f"{signature}.\nIf you have recently updated the repo, it is possible "
"the checkpoints have been updated. It is also possible that a previous "
f"download did not run to completion.\nPlease delete the file '{target.absolute()}' "
"and try again.",
file=sys.stderr)
sys.exit(1)
def encode_mp3(wav, path, verbose=False):
try:
import lameenc
except ImportError:
print("Failed to call lame encoder. Maybe it is not installed? "
"On windows, run `python.exe -m pip install -U lameenc`, "
"on OSX/Linux, run `python3 -m pip install -U lameenc`, "
"then try again.", file=sys.stderr)
sys.exit(1)
encoder = lameenc.Encoder()
encoder.set_bit_rate(320)
encoder.set_in_sample_rate(44100)
encoder.set_channels(2)
encoder.set_quality(2) # 2-highest, 7-fastest
if not verbose:
encoder.silence()
mp3_data = encoder.encode(wav.tostring())
mp3_data += encoder.flush()
with open(path, "wb") as f:
f.write(mp3_data)
def main():
parser = argparse.ArgumentParser("demucs.separate",
description="Separate the sources for the given tracks")
parser.add_argument("tracks", nargs='+', type=Path, default=[], help='Path to tracks')
parser.add_argument("-n",
"--name",
default="demucs",
help="Model name. See README.md for the list of pretrained models. "
"Default is demucs.")
parser.add_argument("-Q", "--quantized", action="store_true", dest="quantized", default=False,
help="Load the quantized model rather than the quantized version. "
"Quantized model is about 4 times smaller but might worsen "
"slightly quality.")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-o",
"--out",
type=Path,
default=Path("separated"),
help="Folder where to put extracted tracks. A subfolder "
"with the model name will be created.")
parser.add_argument("--models",
type=Path,
default=Path("models"),
help="Path to trained models. "
"Also used to store downloaded pretrained models")
parser.add_argument("--dl",
action="store_true",
help="Automatically download model if missing.")
parser.add_argument("-d",
"--device",
default="cuda" if th.cuda.is_available() else "cpu",
help="Device to use, default is cuda if available else cpu")
parser.add_argument("--shifts",
default=0,
type=int,
help="Number of random shifts for equivariant stabilization."
"Increase separation time but improves quality for Demucs. 10 was used "
"in the original paper.")
parser.add_argument("--nosplit",
action="store_false",
default=True,
dest="split",
help="Apply the model to the entire input at once rather than "
"first splitting it in chunks of 10 seconds. Will OOM with Tasnet "
"but will work fine for Demucs if you have at least 16GB of RAM.")
parser.add_argument("--float32",
action="store_true",
help="Convert the output wavefile to use pcm f32 format instead of s16. "
"This should not make a difference if you just plan on listening to the "
"audio but might be needed to compute exactly metrics like SDR etc.")
parser.add_argument("--int16",
action="store_false",
dest="float32",
help="Opposite of --float32, here for compatibility.")
parser.add_argument("--mp3", action="store_true",
help="Convert the output wavs to mp3 with 320 kb/s rate.")
args = parser.parse_args()
name = args.name + ".th"
if args.quantized:
name += ".gz"
model_path = args.models / name
sha256 = PRETRAINED_MODELS.get(name)
if not model_path.is_file():
if sha256 is None:
print(f"No pretrained model {args.name}", file=sys.stderr)
sys.exit(1)
if not args.dl:
print(
f"Could not find model {model_path}, however a matching pretrained model exist, "
"to download it, use --dl",
file=sys.stderr)
sys.exit(1)
args.models.mkdir(exist_ok=True, parents=True)
url = BASE_URL + name
print("Downloading pre-trained model weights, this could take a while...")
download_file(url, model_path)
if sha256 is not None:
verify_file(model_path, sha256)
model = load_model(model_path).to(args.device)
if args.quantized:
args.name += "_quantized"
out = args.out / args.name
out.mkdir(parents=True, exist_ok=True)
source_names = ["drums", "bass", "other", "vocals"]
print(f"Separated tracks will be stored in {out.resolve()}")
for track in args.tracks:
if not track.exists():
print(
f"File {track} does not exist. If the path contains spaces, "
"please try again after surrounding the entire path with quotes \"\".",
file=sys.stderr)
continue
print(f"Separating track {track}")
wav = AudioFile(track).read(streams=0, samplerate=44100, channels=2).to(args.device)
# Round to nearest short integer for compatibility with how MusDB load audio with stempeg.
wav = (wav * 2**15).round() / 2**15
ref = wav.mean(0)
wav = (wav - ref.mean()) / ref.std()
sources = apply_model(model, wav, shifts=args.shifts, split=args.split, progress=True)
sources = sources * ref.std() + ref.mean()
track_folder = out / track.name.split(".")[0]
track_folder.mkdir(exist_ok=True)
for source, name in zip(sources, source_names):
if args.mp3 or not args.float32:
source = (source * 2**15).clamp_(-2**15, 2**15 - 1).short()
source = source.cpu().transpose(0, 1).numpy()
stem = str(track_folder / name)
if args.mp3:
encode_mp3(source, stem + ".mp3", verbose=args.verbose)
else:
wavname = str(track_folder / f"{name}.wav")
wavfile.write(wavname, 44100, source)
if __name__ == "__main__":
main()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gzip
import sys
from concurrent import futures
import musdb
import museval
import torch as th
import tqdm
from scipy.io import wavfile
from torch import distributed
from .utils import apply_model
def evaluate(model,
musdb_path,
eval_folder,
workers=2,
device="cpu",
rank=0,
save=False,
shifts=0,
split=False,
check=True,
world_size=1):
"""
Evaluate model using museval. Run the model
on a single GPU, the bottleneck being the call to museval.
"""
source_names = ["drums", "bass", "other", "vocals"]
output_dir = eval_folder / "results"
output_dir.mkdir(exist_ok=True, parents=True)
json_folder = eval_folder / "results/test"
json_folder.mkdir(exist_ok=True, parents=True)
# we load tracks from the original musdb set
test_set = musdb.DB(musdb_path, subsets=["test"])
for p in model.parameters():
p.requires_grad = False
p.grad = None
pendings = []
with futures.ProcessPoolExecutor(workers or 1) as pool:
for index in tqdm.tqdm(range(rank, len(test_set), world_size), file=sys.stdout):
track = test_set.tracks[index]
out = json_folder / f"{track.name}.json.gz"
if out.exists():
continue
mix = th.from_numpy(track.audio).t().float()
ref = mix.mean(dim=0) # mono mixture
mix = (mix - ref.mean()) / ref.std()
estimates = apply_model(model, mix.to(device), shifts=shifts, split=split)
estimates = estimates * ref.std() + ref.mean()
estimates = estimates.transpose(1, 2)
references = th.stack(
[th.from_numpy(track.targets[name].audio) for name in source_names])
references = references.numpy()
estimates = estimates.cpu().numpy()
if save:
folder = eval_folder / "wav/test" / track.name
folder.mkdir(exist_ok=True, parents=True)
for name, estimate in zip(source_names, estimates):
wavfile.write(str(folder / (name + ".wav")), 44100, estimate)
if workers:
pendings.append((track.name, pool.submit(museval.evaluate, references, estimates)))
else:
pendings.append((track.name, museval.evaluate(references, estimates)))
del references, mix, estimates, track
for track_name, pending in tqdm.tqdm(pendings, file=sys.stdout):
if workers:
pending = pending.result()
sdr, isr, sir, sar = pending
track_store = museval.TrackStore(win=44100, hop=44100, track_name=track_name)
for idx, target in enumerate(source_names):
values = {
"SDR": sdr[idx].tolist(),
"SIR": sir[idx].tolist(),
"ISR": isr[idx].tolist(),
"SAR": sar[idx].tolist()
}
track_store.add_target(target_name=target, values=values)
json_path = json_folder / f"{track_name}.json.gz"
gzip.open(json_path, "w").write(track_store.json.encode('utf-8'))
if world_size > 1:
distributed.barrier()
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch as th
from torch import nn
class Shift(nn.Module):
"""
Randomly shift audio in time by up to `shift` samples.
"""
def __init__(self, shift=8192):
super().__init__()
self.shift = shift
def forward(self, wav):
batch, sources, channels, time = wav.size()
length = time - self.shift
if self.shift > 0:
if not self.training:
wav = wav[..., :length]
else:
offsets = th.randint(self.shift, [batch, sources, 1, 1], device=wav.device, dtype=th.int64)
offsets = offsets.expand(-1, -1, channels, -1)
indexes = th.arange(length, device=wav.device)
wav = wav.gather(3, indexes + offsets)
return wav
class FlipChannels(nn.Module):
"""
Flip left-right channels.
"""
def forward(self, wav):
batch, sources, channels, time = wav.size()
if self.training and wav.size(2) == 2:
left = th.randint(2, (batch, sources, 1, 1), device=wav.device, dtype=th.int64)
left = left.expand(-1, -1, -1, time)
right = 1 - left
wav = th.cat([wav.gather(2, left), wav.gather(2, right)], dim=2)
return wav
class FlipSign(nn.Module):
"""
Random sign flip.
"""
def forward(self, wav):
batch, sources, channels, time = wav.size()
if self.training:
signs = th.randint(2, (batch, sources, 1, 1), device=wav.device, dtype=th.float32)
wav = wav * (2 * signs - 1)
return wav
class Remix(nn.Module):
"""
Shuffle sources to make new mixes.
"""
def __init__(self, group_size=4):
"""
Shuffle sources within one batch.
Each batch is divided into groups of size `group_size` and shuffling is done within
each group separatly. This allow to keep the same probability distribution no matter
the number of GPUs. Without this grouping, using more GPUs would lead to a higher
probability of keeping two sources from the same track together which can impact
performance.
"""
super().__init__()
self.group_size = group_size
def forward(self, wav):
batch, streams, channels, time = wav.size()
device = wav.device
if self.training:
if self.group_size is not None:
group_size = self.group_size
else:
group_size = batch
if batch % group_size != 0:
raise ValueError(f"Batch size {batch} must be divisible by group size {group_size}")
groups = batch // group_size
wav = wav.view(groups, group_size, streams, channels, time)
permutations = th.argsort(th.rand(groups, group_size, streams, 1, 1, device=device),
dim=1)
wav = wav.gather(1, permutations.expand(-1, -1, -1, channels, time))
wav = wav.view(batch, streams, channels, time)
return wav
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch as th
from torch import Tensor, nn
from .utils import capture_init, center_trim
from torch.nn.modules.conv import Conv1d, ConvTranspose1d
from typing import Union
class BLSTM(nn.Module):
def __init__(self, dim: int, layers: int=1) -> None:
super().__init__()
self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim)
self.lstm.flatten_parameters()
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x: torch.Tensor):
x = x.permute(2, 0, 1)
x = self.lstm(x)[0]
x = self.linear(x)
x = x.permute(1, 2, 0)
return x
def rescale_conv(conv: Union[Conv1d, ConvTranspose1d], reference: float) -> None:
std = conv.weight.std().detach()
scale = (std / reference)**0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module: th.nn.Module, reference: float) -> None:
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
def upsample(x, stride: int):
"""
Linear upsampling, the output will be `stride` times longer.
"""
batch, channels, time = x.size()
weight = th.arange(stride, device=x.device, dtype=th.float) / stride
x = x.view(batch, channels, time, 1)
out = x[..., :-1, :] * (1 - weight) + x[..., 1:, :] * weight
return out.reshape(batch, channels, -1)
def downsample(x, stride: int):
"""
Downsample x by decimation.
"""
return x[:, :, ::stride]
class Demucs(nn.Module):
__constants__ = ["stride"]
@capture_init
def __init__(self,
sources: int=4,
audio_channels: int=2,
channels: int=64,
depth: int=6,
rewrite: bool=True,
glu: bool=True,
upsample: bool=False,
rescale: float=0.1,
kernel_size: int=8,
stride: int=4,
growth: float=2.,
lstm_layers: int=2,
context: int=3) -> None:
"""
Args:
sources (int): number of sources to separate
audio_channels (int): stereo or mono
channels (int): first convolution channels
depth (int): number of encoder/decoder layers
rewrite (bool): add 1x1 convolution to each encoder layer
and a convolution to each decoder layer.
For the decoder layer, `context` gives the kernel size.
glu (bool): use glu instead of ReLU
upsample (bool): use linear upsampling with convolutions
Wave-U-Net style, instead of transposed convolutions
rescale (int): rescale initial weights of convolutions
to get their standard deviation closer to `rescale`
kernel_size (int): kernel size for convolutions
stride (int): stride for convolutions
growth (float): multiply (resp divide) number of channels by that
for each layer of the encoder (resp decoder)
lstm_layers (int): number of lstm layers, 0 = no lstm
context (int): kernel size of the convolution in the
decoder before the transposed convolution. If > 1,
will provide some context from neighboring time
steps.
"""
super().__init__()
self.audio_channels = audio_channels
self.sources = sources
self.kernel_size = kernel_size
self.context = context
self.stride = stride
self.depth = depth
self.upsample = upsample
self.channels = channels
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
self.final = None
if upsample:
self.final = nn.Conv1d(channels + audio_channels, sources * audio_channels, 1)
stride = 1
if glu:
activation = nn.GLU(dim=1)
ch_scale = 2
else:
activation = nn.ReLU()
ch_scale = 1
in_channels = audio_channels
for index in range(depth):
encode = []
encode += [nn.Conv1d(in_channels, channels, kernel_size, stride), nn.ReLU()]
if rewrite:
encode += [nn.Conv1d(channels, ch_scale * channels, 1), activation]
self.encoder.append(nn.Sequential(*encode))
decode = []
if index > 0:
out_channels = in_channels
else:
if upsample:
out_channels = channels
else:
out_channels = sources * audio_channels
if rewrite:
decode += [nn.Conv1d(channels, ch_scale * channels, context), activation]
if upsample:
decode += [
nn.Conv1d(channels, out_channels, kernel_size, stride=1),
]
else:
decode += [nn.ConvTranspose1d(channels, out_channels, kernel_size, stride)]
if index > 0:
decode.append(nn.ReLU())
self.decoder.insert(0, nn.Sequential(*decode))
in_channels = channels
channels = int(growth * channels)
channels = in_channels
if lstm_layers:
self.lstm = BLSTM(channels, lstm_layers)
else:
self.lstm = None
if rescale:
rescale_module(self, reference=rescale)
def valid_length(self, length):
"""
Return the nearest valid length to use with the model so that
there is no time steps left over in a convolutions, e.g. for all
layers, size of the input - kernel_size % stride = 0.
If the mixture has a valid length, the estimated sources
will have exactly the same length when context = 1. If context > 1,
the two signals can be center trimmed to match.
For training, extracts should have a valid length.For evaluation
on full tracks we recommend passing `pad = True` to :method:`forward`.
"""
for _ in range(self.depth):
if self.upsample:
length = math.ceil(length / self.stride) + self.kernel_size - 1
else:
length = math.ceil((length - self.kernel_size) / self.stride) + 1
length = max(1, length)
length += self.context - 1
for _ in range(self.depth):
if self.upsample:
length = length * self.stride + self.kernel_size - 1
else:
length = (length - 1) * self.stride + self.kernel_size
return int(length)
def forward(self, mix: Tensor) -> Tensor:
x = mix
saved = [x]
for encode in self.encoder:
x = encode(x)
saved.append(x)
if self.upsample:
x = downsample(x, self.stride)
if self.lstm is not None:
x = self.lstm(x)
for decode in self.decoder:
if self.upsample:
x = upsample(x, stride=self.stride)
skip = center_trim(saved.pop(-1), x)
x = x + skip
x = decode(x)
if self.final is not None:
skip = center_trim(saved.pop(-1), x)
x = th.cat([x, skip], dim=1)
x = self.final(x)
x = x.view(x.size(0), self.sources, self.audio_channels, x.size(-1))
return x
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from pathlib import Path
def get_parser():
parser = argparse.ArgumentParser("demucs", description="Train and evaluate Demucs.")
default_raw = None
default_musdb = None
if 'DEMUCS_RAW' in os.environ:
default_raw = Path(os.environ['DEMUCS_RAW'])
if 'DEMUCS_MUSDB' in os.environ:
default_musdb = Path(os.environ['DEMUCS_MUSDB'])
parser.add_argument(
"--raw",
type=Path,
default=default_raw,
help="Path to raw audio, can be faster, see python3 -m demucs.raw to extract.")
parser.add_argument("--no_raw", action="store_const", const=None, dest="raw")
parser.add_argument("-m",
"--musdb",
type=Path,
default=default_musdb,
help="Path to musdb root")
parser.add_argument("--metadata", type=Path, default=Path("metadata/musdb.json"))
parser.add_argument("--samplerate", type=int, default=44100)
parser.add_argument("--audio_channels", type=int, default=2)
parser.add_argument("--samples",
default=44100 * 10,
type=int,
help="number of samples to feed in")
parser.add_argument("--data_stride",
default=44100,
type=int,
help="Stride for chunks, shorter = longer epochs")
parser.add_argument("-w", "--workers", default=10, type=int, help="Loader workers")
parser.add_argument("--eval_workers", default=2, type=int, help="Final evaluation workers")
parser.add_argument("-d",
"--device",
help="Device to train on, default is cuda if available else cpu")
parser.add_argument("--eval_cpu", action="store_true", help="Eval on test will be run on cpu.")
parser.add_argument("--dummy", help="Dummy parameter, useful to create a new checkpoint file")
parser.add_argument("--test", help="Just run the test pipeline + one validation. "
"This should be a filename relative to the models/ folder.")
parser.add_argument("--rank", default=0, type=int)
parser.add_argument("--world_size", default=1, type=int)
parser.add_argument("--master")
parser.add_argument("--checkpoints",
type=Path,
default=Path("checkpoints"),
help="Folder where to store checkpoints etc")
parser.add_argument("--evals",
type=Path,
default=Path("evals"),
help="Folder where to store evals and waveforms")
parser.add_argument("--save",
action="store_true",
help="Save estimated for the test set waveforms")
parser.add_argument("--logs",
type=Path,
default=Path("logs"),
help="Folder where to store logs")
parser.add_argument("--models",
type=Path,
default=Path("models"),
help="Folder where to store trained models")
parser.add_argument("-R",
"--restart",
action='store_true',
help='Restart training, ignoring previous run')
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("-e", "--epochs", type=int, default=120, help="Number of epochs")
parser.add_argument("-r",
"--repeat",
type=int,
default=2,
help="Repeat the train set, longer epochs")
parser.add_argument("-b", "--batch_size", type=int, default=64)
parser.add_argument("--lr", type=float, default=3e-4)
parser.add_argument("--mse", action="store_true", help="Use MSE instead of L1")
parser.add_argument("--no_augment",
action="store_false",
dest="augment",
default=True,
help="No data augmentation")
parser.add_argument("--remix_group_size",
type=int,
default=4,
help="Shuffle sources using group of this size. Useful to somewhat "
"replicate multi-gpu training "
"on less GPUs.")
parser.add_argument("--shifts",
type=int,
default=10,
help="Number of random shifts used for random equivariant stabilization.")
# See model.py for doc
parser.add_argument("--growth",
type=float,
default=2.,
help="Number of channels between two layers will increase by this factor")
parser.add_argument("--depth",
type=int,
default=6,
help="Number of layers for the encoder and decoder")
parser.add_argument("--lstm_layers", type=int, default=2, help="Number of layers for the LSTM")
parser.add_argument("--channels",
type=int,
default=100,
help="Number of channels for the first encoder layer")
parser.add_argument("--kernel_size",
type=int,
default=8,
help="Kernel size for the (transposed) convolutions")
parser.add_argument("--conv_stride",
type=int,
default=4,
help="Stride for the (transposed) convolutions")
parser.add_argument("--context",
type=int,
default=3,
help="Context size for the decoder convolutions "
"before the transposed convolutions")
parser.add_argument("--rescale",
type=float,
default=0.1,
help="Initial weight rescale reference")
parser.add_argument("--no_glu",
action="store_false",
default=True,
dest="glu",
help="Replace all GLUs by ReLUs")
parser.add_argument("--no_rewrite",
action="store_false",
default=True,
dest="rewrite",
help="No 1x1 rewrite convolutions")
parser.add_argument("--upsample",
action="store_true",
help="Use linear upsampling + convolution "
"instead of transposed convolutions")
# Benchmark options
parser.add_argument("--script", action="store_true")
parser.add_argument("--eval", action="store_true")
parser.add_argument("--debug", type=str, default=None)
# Tasnet options
parser.add_argument("--tasnet", action="store_true")
parser.add_argument("--split_valid",
action="store_true",
help="Predict chunks by chunks for valid and test. Required for tasnet")
parser.add_argument("--X", type=int, default=8)
parser.add_argument("--show",
action="store_true",
help="Show model architecture, size and exit")
parser.add_argument("--save_model", action="store_true")
return parser
def get_name(parser, args):
"""
Return the name of an experiment given the args. Some parameters are ignored,
for instance --workers, as they do not impact the final result.
"""
ignore_args = set([
"checkpoints",
"deterministic",
"eval",
"evals",
"eval_cpu",
"eval_workers",
"logs",
"master",
"rank",
"restart",
"save",
"save_model",
"show",
"valid",
"workers",
"world_size",
])
parts = []
name_args = dict(args.__dict__)
for name, value in name_args.items():
if name in ignore_args:
continue
if value != parser.get_default(name):
if isinstance(value, Path):
parts.append(f"{name}={value.name}")
else:
parts.append(f"{name}={value}")
if parts:
name = " ".join(parts)
else:
name = "default"
return name
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import errno
import functools
import gzip
import os
import random
import socket
import tempfile
import warnings
from contextlib import contextmanager
import torch as th
import tqdm
from torch import Tensor, distributed
from torch.nn import functional as F
from typing import Callable, Any
def center_trim(tensor: Tensor, reference: Tensor) -> Tensor:
"""
Center trim `tensor` with respect to `reference`, along the last dimension.
`reference` can also be a number, representing the length to trim to.
If the size difference != 0 mod 2, the extra sample is removed on the right side.
"""
reference_val: int = reference.size(-1)
delta = tensor.size(-1) - reference_val
if delta < 0:
raise ValueError("tensor must be larger than reference. " f"Delta is {delta}.")
if delta:
tensor = tensor[..., delta // 2:-(delta - delta // 2)]
return tensor
def average_metric(metric, count=1.):
"""
Average `metric` which should be a float across all hosts. `count` should be
the weight for this particular host (i.e. number of examples).
"""
metric = th.tensor([count, count * metric], dtype=th.float32, device='cuda')
distributed.all_reduce(metric, op=distributed.ReduceOp.SUM)
return metric[1].item() / metric[0].item()
def free_port(host='', low=20000, high=40000):
"""
Return a port number that is most likely free.
This could suffer from a race condition although
it should be quite rare.
"""
sock = socket.socket()
while True:
port = random.randint(low, high)
try:
sock.bind((host, port))
except OSError as error:
if error.errno == errno.EADDRINUSE:
continue
raise
return port
def sizeof_fmt(num, suffix='B'):
"""
Given `num` bytes, return human readable size.
Taken from https://stackoverflow.com/a/1094933
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def human_seconds(seconds, display='.2f'):
"""
Given `seconds` seconds, return human readable duration.
"""
value = seconds * 1e6
ratios = [1e3, 1e3, 60, 60, 24]
names = ['us', 'ms', 's', 'min', 'hrs', 'days']
last = names.pop(0)
for name, ratio in zip(names, ratios):
if value / ratio < 0.3:
break
value /= ratio
last = name
return f"{format(value, display)} {last}"
def apply_model(model, mix, shifts=None, split=False, progress=False):
"""
Apply model to a given mixture.
Args:
shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec
and apply the oppositve shift to the output. This is repeated `shifts` time and
all predictions are averaged. This effectively makes the model time equivariant
and improves SDR by up to 0.2 points.
split (bool): if True, the input will be broken down in 8 seconds extracts
and predictions will be performed individually on each and concatenated.
Useful for model with large memory footprint like Tasnet.
progress (bool): if True, show a progress bar (requires split=True)
"""
channels, length = mix.size()
device = mix.device
if split:
out = th.zeros(4, channels, length, device=device)
shift = 44_100 * 10
offsets = range(0, length, shift)
scale = 10
if progress:
offsets = tqdm.tqdm(offsets, unit_scale=scale, ncols=120, unit='seconds')
for offset in offsets:
chunk = mix[..., offset:offset + shift]
chunk_out = apply_model(model, chunk, shifts=shifts)
out[..., offset:offset + shift] = chunk_out
offset += shift
return out
elif shifts:
max_shift = 22050
mix = F.pad(mix, (max_shift, max_shift))
offsets = list(range(max_shift))
random.shuffle(offsets)
out = 0
for offset in offsets[:shifts]:
shifted = mix[..., offset:offset + length + max_shift]
shifted_out = apply_model(model, shifted)
out += shifted_out[..., max_shift - offset:max_shift - offset + length]
out /= shifts
return out
else:
valid_length = model.valid_length(length)
delta = valid_length - length
padded = F.pad(mix, (delta // 2, delta - delta // 2))
with th.no_grad():
out = model(padded.unsqueeze(0))[0]
return center_trim(out, mix)
@contextmanager
def temp_filenames(count, delete=True, **kwargs):
names = []
try:
for _ in range(count):
names.append(tempfile.NamedTemporaryFile(delete=False).name)
yield names
finally:
if delete:
for name in names:
os.unlink(name)
def load_model(path):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
load_from = path
if str(path).endswith(".gz"):
load_from = gzip.open(path, "rb")
klass, args, kwargs, state = th.load(load_from, 'cpu')
model = klass(*args, **kwargs)
model.load_state_dict(state)
return model
def save_model(model, path):
args, kwargs = model._init_args_kwargs
klass = model.__class__
state = {k: p.data.to('cpu') for k, p in model.state_dict().items()}
save_to = path
if str(path).endswith(".gz"):
save_to = gzip.open(path, "wb", compresslevel=5)
th.save((klass, args, kwargs, state), save_to)
def capture_init(init: Callable) -> Callable:
@functools.wraps(init)
def __init__(self, *args, **kwargs):
self._init_args_kwargs = (args, kwargs)
init(self, *args, **kwargs)
return __init__
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import tqdm
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import torch
from .utils import apply_model, average_metric, center_trim
def train_model(epoch,
dataset,
model,
criterion,
optimizer,
augment,
repeat=1,
device="cpu",
seed=None,
workers=4,
world_size=1,
batch_size=16):
if world_size > 1:
sampler = DistributedSampler(dataset)
sampler_epoch = epoch * repeat
if seed is not None:
sampler_epoch += seed * 1000
sampler.set_epoch(sampler_epoch)
batch_size //= world_size
loader = DataLoader(dataset, batch_size=batch_size, sampler=sampler, num_workers=workers)
else:
loader = DataLoader(dataset, batch_size=batch_size, num_workers=workers, shuffle=True)
current_loss = 0
for repetition in range(repeat):
tq = tqdm.tqdm(loader,
ncols=120,
desc=f"[{epoch:03d}] train ({repetition + 1}/{repeat})",
leave=False,
file=sys.stdout,
unit=" batch")
total_loss = 0
for idx, streams in enumerate(tq):
if len(streams) < batch_size:
# skip uncomplete batch for augment.Remix to work properly
continue
streams = streams.to(device)
sources = streams[:, 1:]
sources = augment(sources)
mix = sources.sum(dim=1)
estimates = model(mix)
sources = center_trim(sources, estimates)
loss = criterion(estimates, sources)
loss.backward()
optimizer.step()
optimizer.zero_grad()
total_loss += loss.item()
current_loss = total_loss / (1 + idx)
tq.set_postfix(loss=f"{current_loss:.4f}")
# free some space before next round
del streams, sources, mix, estimates, loss
if world_size > 1:
sampler.epoch += 1
if world_size > 1:
current_loss = average_metric(current_loss)
return current_loss
def validate_model(epoch,
dataset,
model,
criterion,
device="cpu",
rank=0,
world_size=1,
shifts=0,
split=False,
debug_file=None):
indexes = range(rank, len(dataset), world_size)
tq = tqdm.tqdm(indexes,
ncols=120,
desc=f"[{epoch:03d}] valid",
leave=False,
file=sys.stdout,
unit=" track")
current_loss = 0
for index in tq:
streams = dataset[index]
# first five minutes to avoid OOM on --upsample models
streams = streams[..., :15_000_000]
streams = streams.to(device)
sources = streams[1:]
mix = streams[0]
estimates = apply_model(model, mix, shifts=shifts, split=split)
if debug_file is not None:
torch.save(estimates, debug_file)
loss = criterion(estimates, sources)
current_loss += loss.item() / len(indexes)
del estimates, streams, sources
if world_size > 1:
current_loss = average_metric(current_loss, len(indexes))
return current_loss
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import subprocess as sp
from pathlib import Path
import numpy as np
import torch
from .utils import temp_filenames
def _read_info(path):
stdout_data = sp.check_output([
'ffprobe', "-loglevel", "panic",
str(path), '-print_format', 'json', '-show_format', '-show_streams'
])
return json.loads(stdout_data.decode('utf-8'))
class AudioFile:
"""
Allows to read audio from any format supported by ffmpeg, as well as resampling or
converting to mono on the fly. See :method:`read` for more details.
"""
def __init__(self, path: Path):
self.path = Path(path)
self._info = None
def __repr__(self):
features = [("path", self.path)]
features.append(("samplerate", self.samplerate()))
features.append(("channels", self.channels()))
features.append(("streams", len(self)))
features_str = ", ".join(f"{name}={value}" for name, value in features)
return f"AudioFile({features_str})"
@property
def info(self):
if self._info is None:
self._info = _read_info(self.path)
return self._info
@property
def duration(self):
return float(self.info['format']['duration'])
@property
def _audio_streams(self):
return [
index for index, stream in enumerate(self.info["streams"])
if stream["codec_type"] == "audio"
]
def __len__(self):
return len(self._audio_streams)
def channels(self, stream=0):
return int(self.info['streams'][self._audio_streams[stream]]['channels'])
def samplerate(self, stream=0):
return int(self.info['streams'][self._audio_streams[stream]]['sample_rate'])
def read(self,
seek_time=None,
duration=None,
streams=slice(None),
samplerate=None,
channels=None,
temp_folder=None):
"""
Slightly more efficient implementation than stempeg,
in particular, this will extract all stems at once
rather than having to loop over one file multiple times
for each stream.
Args:
seek_time (float): seek time in seconds or None if no seeking is needed.
duration (float): duration in seconds to extract or None to extract until the end.
streams (slice, int or list): streams to extract, can be a single int, a list or
a slice. If it is a slice or list, the output will be of size [S, C, T]
with S the number of streams, C the number of channels and T the number of samples.
If it is an int, the output will be [C, T].
samplerate (int): if provided, will resample on the fly. If None, no resampling will
be done. Original sampling rate can be obtained with :method:`samplerate`.
channels (int): if 1, will convert to mono. We do not rely on ffmpeg for that
as ffmpeg automatically scale by +3dB to conserve volume when playing on speakers.
See https://sound.stackexchange.com/a/42710.
Our definition of mono is simply the average of the two channels. Any other
value will be ignored.
temp_folder (str or Path or None): temporary folder to use for decoding.
"""
streams = np.array(range(len(self)))[streams]
single = not isinstance(streams, np.ndarray)
if single:
streams = [streams]
if duration is None:
target_size = None
query_duration = None
else:
target_size = int((samplerate or self.samplerate()) * duration)
query_duration = float((target_size + 1) / (samplerate or self.samplerate()))
with temp_filenames(len(streams)) as filenames:
command = ['ffmpeg', '-y']
command += ['-loglevel', 'panic']
if seek_time:
command += ['-ss', str(seek_time)]
command += ['-i', str(self.path)]
for stream, filename in zip(streams, filenames):
command += ['-map', f'0:{self._audio_streams[stream]}']
if query_duration is not None:
command += ['-t', str(query_duration)]
command += ['-threads', '1']
command += ['-f', 'f32le']
if samplerate is not None:
command += ['-ar', str(samplerate)]
command += [filename]
sp.run(command, check=True)
wavs = []
for filename in filenames:
wav = np.fromfile(filename, dtype=np.float32)
wav = torch.from_numpy(wav)
wav = wav.view(-1, self.channels()).t()
if channels == 1:
# Case 1:
# The caller asked 1-channel audio, but the stream have multiple
# channels, downmix all channels.
# We do mono convertion here as ffmpeg mess up the volume of mono output
# otherwise. See https://sound.stackexchange.com/a/42710.
wav = wav.mean(dim=0, keepdim=True)
elif self.channels() == 1 and channels != 1:
# Case 2:
# The caller asked for multiple channels, but the input file have
# one single channel, replicate the audio over all channels.
wav = wav.as_strided(size=(channels, wav.shape[1]), stride=(0, 1))
elif self.channels() >= channels:
# Case 3:
# The caller asked for multiple channels, and the input file have
# more channels than requested. In that case return the first channels.
wav = wav[:channels, :]
else:
# Case 4: What is a reasonable choice here?
raise ValueError('The input file has less channels than requested')
if target_size is not None:
wav = wav[..., :target_size]
wavs.append(wav)
wav = torch.stack(wavs, dim=0)
if single:
wav = wav[0]
return wav
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import sys
import time
from dataclasses import dataclass, field
from fractions import Fraction
import torch as th
from torch import distributed, nn
from torch.nn.parallel.distributed import DistributedDataParallel
from .augment import FlipChannels, FlipSign, Remix, Shift
from .compressed import StemsSet, build_musdb_metadata, get_musdb_tracks
from .model import Demucs
from .parser import get_name, get_parser
from .raw import Rawset
from .tasnet import ConvTasNet
from .test import evaluate
from .train import train_model, validate_model
from .utils import human_seconds, load_model, save_model, sizeof_fmt
th.backends.cudnn.deterministic = True
th.backends.cudnn.benchmark = False
@dataclass
class SavedState:
metrics: list = field(default_factory=list)
last_state: dict = None
best_state: dict = None
optimizer: dict = None
def main():
parser = get_parser()
args = parser.parse_args()
name = get_name(parser, args)
print(f"Experiment {name}")
if args.musdb is None and args.rank == 0:
print(
"You must provide the path to the MusDB dataset with the --musdb flag. "
"To download the MusDB dataset, see https://sigsep.github.io/datasets/musdb.html.",
file=sys.stderr)
sys.exit(1)
eval_folder = args.evals / name
eval_folder.mkdir(exist_ok=True, parents=True)
args.logs.mkdir(exist_ok=True)
metrics_path = args.logs / f"{name}.json"
eval_folder.mkdir(exist_ok=True, parents=True)
args.checkpoints.mkdir(exist_ok=True, parents=True)
args.models.mkdir(exist_ok=True, parents=True)
if args.device is None:
device = "cpu"
if th.cuda.is_available():
device = "cuda"
else:
device = args.device
th.manual_seed(args.seed)
# Prevents too many threads to be started when running `museval` as it can be quite
# inefficient on NUMA architectures.
os.environ["OMP_NUM_THREADS"] = "1"
if args.world_size > 1:
if device != "cuda" and args.rank == 0:
print("Error: distributed training is only available with cuda device", file=sys.stderr)
sys.exit(1)
th.cuda.set_device(args.rank % th.cuda.device_count())
distributed.init_process_group(backend="nccl",
init_method="tcp://" + args.master,
rank=args.rank,
world_size=args.world_size)
checkpoint = args.checkpoints / f"{name}.th"
checkpoint_tmp = args.checkpoints / f"{name}.th.tmp"
if args.restart and checkpoint.exists():
checkpoint.unlink()
if args.test:
args.epochs = 1
args.repeat = 0
model = load_model(args.models / args.test)
elif args.tasnet:
model = ConvTasNet(audio_channels=args.audio_channels, X=args.X)
else:
model = Demucs(
audio_channels=args.audio_channels,
channels=args.channels,
context=args.context,
depth=args.depth,
glu=args.glu,
growth=args.growth,
kernel_size=args.kernel_size,
lstm_layers=args.lstm_layers,
rescale=args.rescale,
rewrite=args.rewrite,
sources=4,
stride=args.conv_stride,
upsample=args.upsample,
)
if args.script:
model = th.jit.script(model)
model.to(device)
if args.show:
print(model)
size = sizeof_fmt(4 * sum(p.numel() for p in model.parameters()))
print(f"Model size {size}")
return
optimizer = th.optim.Adam(model.parameters(), lr=args.lr)
try:
saved = th.load(checkpoint, map_location='cpu')
except IOError:
saved = SavedState()
else:
model.load_state_dict(saved.last_state)
optimizer.load_state_dict(saved.optimizer)
if args.save_model:
if args.rank == 0:
model.to("cpu")
model.load_state_dict(saved.best_state)
save_model(model, args.models / f"{name}.th")
return
if args.rank == 0:
done = args.logs / f"{name}.done"
if done.exists():
done.unlink()
if args.augment:
augment = nn.Sequential(FlipSign(), FlipChannels(), Shift(args.data_stride),
Remix(group_size=args.remix_group_size)).to(device)
else:
augment = Shift(args.data_stride)
if args.mse:
criterion = nn.MSELoss()
else:
criterion = nn.L1Loss()
# Setting number of samples so that all convolution windows are full.
# Prevents hard to debug mistake with the prediction being shifted compared
# to the input mixture.
samples = model.valid_length(args.samples)
print(f"Number of training samples adjusted to {samples}")
if args.raw:
train_set = Rawset(args.raw / "train",
samples=samples + args.data_stride,
channels=args.audio_channels,
streams=[0, 1, 2, 3, 4],
stride=args.data_stride)
valid_set = Rawset(args.raw / "valid", channels=args.audio_channels)
else:
if not args.metadata.is_file() and args.rank == 0:
build_musdb_metadata(args.metadata, args.musdb, args.workers)
if args.world_size > 1:
distributed.barrier()
metadata = json.load(open(args.metadata))
duration = Fraction(samples + args.data_stride, args.samplerate)
stride = Fraction(args.data_stride, args.samplerate)
train_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="train"),
metadata,
duration=duration,
stride=stride,
samplerate=args.samplerate,
channels=args.audio_channels)
# Reuse training data since we're just benchmarking.
valid_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="train"),
metadata,
samplerate=args.samplerate,
channels=args.audio_channels)
if args.world_size > 1:
dmodel = DistributedDataParallel(model,
device_ids=[th.cuda.current_device()],
output_device=th.cuda.current_device())
else:
dmodel = model
if not args.eval:
for epoch in range(len(saved.metrics), args.epochs):
begin = time.time()
model.train()
train_loss = train_model(epoch,
train_set,
dmodel,
criterion,
optimizer,
augment,
batch_size=args.batch_size,
device=device,
repeat=args.repeat,
seed=args.seed,
workers=args.workers,
world_size=args.world_size)
duration = time.time() - begin
print(f"Epoch {epoch:03d}: "
f"train={train_loss:.8f} "
f"duration={human_seconds(duration)}")
if args.debug is not None and os.path.exists(args.debug):
os.remove(args.debug)
model.eval()
valid_loss = validate_model(0,
valid_set,
model,
criterion,
device=device,
rank=args.rank,
split=args.split_valid,
world_size=args.world_size,
debug_file=args.debug)
if __name__ == "__main__":
main()
|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 8
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(name="hf_Reformer", test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
import torch
import sys
a = torch.load(sys.argv[1])
b = torch.load(sys.argv[2])
torch.testing.assert_allclose(a,b, rtol=0.01, atol=0.01)
|
import argparse
import torch.distributed as dist
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.tensorboard import SummaryWriter
from .test import test # import test.py to get mAP after each epoch
from .yolo_models import *
from .yolo_utils.datasets import *
from .yolo_utils.utils import *
from .yolo_utils.parse_config import parse_data_cfg
from pathlib import Path
def print(*args):
pass
def _prefetch_loader(loader, size, fields=[], collate_fn=lambda x: x):
result = []
for index, item in enumerate(loader):
litem = list(item)
for f in fields:
litem[f] = collate_fn(litem[f])
result.append(tuple(litem))
if index == size:
break
return result
def prepare_training_loop(args):
mixed_precision = False
wdir = 'weights' + os.sep # weights dir
last = wdir + 'last.pt'
best = wdir + 'best.pt'
results_file = 'results.txt'
# Hyperparameters
hyp = {'giou': 3.54, # giou loss gain
'cls': 37.4, # cls loss gain
'cls_pw': 1.0, # cls BCELoss positive_weight
'obj': 64.3, # obj loss gain (*=img_size/320 if img_size != 320)
'obj_pw': 1.0, # obj BCELoss positive_weight
'iou_t': 0.20, # iou training threshold
'lr0': 0.01, # initial learning rate (SGD=5E-3, Adam=5E-4)
'lrf': 0.0005, # final learning rate (with cos scheduler)
'momentum': 0.937, # SGD momentum
'weight_decay': 0.0005, # optimizer weight decay
'fl_gamma': 0.0, # focal loss gamma (efficientDet default is gamma=1.5)
'hsv_h': 0.0138, # image HSV-Hue augmentation (fraction)
'hsv_s': 0.678, # image HSV-Saturation augmentation (fraction)
'hsv_v': 0.36, # image HSV-Value augmentation (fraction)
'degrees': 1.98 * 0, # image rotation (+/- deg)
'translate': 0.05 * 0, # image translation (+/- fraction)
'scale': 0.05 * 0, # image scale (+/- gain)
'shear': 0.641 * 0} # image shear (+/- deg)
# Overwrite hyp with hyp*.txt (optional)
f = glob.glob('hyp*.txt')
if f:
print('Using %s' % f[0])
for k, v in zip(hyp.keys(), np.loadtxt(f[0])):
hyp[k] = v
# Print focal loss if gamma > 0
if hyp['fl_gamma']:
print('Using FocalLoss(gamma=%g)' % hyp['fl_gamma'])
def get_train(hyp):
cfg = opt.cfg
data = opt.data
epochs = opt.epochs # 500200 batches at bs 64, 117263 images = 273 epochs
batch_size = opt.batch_size
accumulate = max(round(64 / batch_size), 1) # accumulate n times before optimizer update (bs 64)
weights = opt.weights # initial training weights
imgsz_min, imgsz_max, imgsz_test = opt.img_size # img sizes (min, max, test)
# Image Sizes
gs = 32 # (pixels) grid size
assert math.fmod(imgsz_min, gs) == 0, '--img-size %g must be a %g-multiple' % (imgsz_min, gs)
opt.multi_scale |= imgsz_min != imgsz_max # multi if different (min, max)
if opt.multi_scale:
if imgsz_min == imgsz_max:
imgsz_min //= 1.5
imgsz_max //= 0.667
grid_min, grid_max = imgsz_min // gs, imgsz_max // gs
imgsz_min, imgsz_max = int(grid_min * gs), int(grid_max * gs)
img_size = imgsz_max # initialize with max size
# Configure run
# do not init seeds because it is already initialized in __init__.py
# init_seeds(0)
data_dict = parse_data_cfg(data)
train_path = os.path.dirname(__file__) + '/' + data_dict['train']
test_path = os.path.dirname(__file__) + '/' + data_dict['valid']
print(train_path)
nc = 1 if opt.single_cls else int(data_dict['classes']) # number of classes
hyp['cls'] *= nc / 80 # update coco-tuned hyp['cls'] to current dataset
# Remove previous results
for f in glob.glob('*_batch*.jpg') + glob.glob(results_file):
os.remove(f)
# Initialize model
model = Darknet(cfg).to(device)
# Optimizer
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in dict(model.named_parameters()).items():
if '.bias' in k:
pg2 += [v] # biases
elif 'Conv2d.weight' in k:
pg1 += [v] # apply weight_decay
else:
pg0 += [v] # all else
if opt.adam:
# hyp['lr0'] *= 0.1 # reduce lr (i.e. SGD=5E-3, Adam=5E-4)
optimizer = optim.Adam(pg0, lr=hyp['lr0'])
# optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1)
else:
optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)
optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay
optimizer.add_param_group({'params': pg2}) # add pg2 (biases)
print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0)))
del pg0, pg1, pg2
start_epoch = 0
best_fitness = 0.0
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
# possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc.
ckpt = torch.load(weights, map_location=device)
# load model
try:
ckpt['model'] = {k: v for k, v in ckpt['model'].items() if model.state_dict()[k].numel() == v.numel()}
model.load_state_dict(ckpt['model'], strict=False)
except KeyError as e:
s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \
"See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights)
raise KeyError(s) from e
# load optimizer
if ckpt['optimizer'] is not None:
optimizer.load_state_dict(ckpt['optimizer'])
best_fitness = ckpt['best_fitness']
# load results
# if ckpt.get('training_results') is not None:
# with open(results_file, 'w') as file:
# file.write(ckpt['training_results']) # write results.txt
# epochs
start_epoch = ckpt['epoch'] + 1
if epochs < start_epoch:
print('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %
(opt.weights, ckpt['epoch'], epochs))
epochs += ckpt['epoch'] # finetune additional epochs
del ckpt
elif len(weights) > 0: # darknet format
# possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc.
load_darknet_weights(model, weights)
if opt.freeze_layers:
output_layer_indices = [idx - 1 for idx, module in enumerate(model.module_list) if isinstance(module, YOLOLayer)]
freeze_layer_indices = [x for x in range(len(model.module_list)) if
(x not in output_layer_indices) and
(x - 1 not in output_layer_indices)]
for idx in freeze_layer_indices:
for parameter in model.module_list[idx].parameters():
parameter.requires_grad_(False)
# Mixed precision training https://github.com/NVIDIA/apex
if mixed_precision:
model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0)
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.95 + 0.05 # cosine
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
scheduler.last_epoch = start_epoch - 1 # see link below
# https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822
# Plot lr schedule
# y = []
# for _ in range(epochs):
# scheduler.step()
# y.append(optimizer.param_groups[0]['lr'])
# plt.plot(y, '.-', label='LambdaLR')
# plt.xlabel('epoch')
# plt.ylabel('LR')
# plt.tight_layout()
# plt.savefig('LR.png', dpi=300)
# Initialize distributed training
# if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available():
# dist.init_process_group(backend='nccl', # 'distributed backend'
# init_method='tcp://127.0.0.1:9999', # distributed training init method
# world_size=1, # number of nodes for distributed training
# rank=0) # distributed training node rank
# model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True)
# model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level
# Dataset
dataset = LoadImagesAndLabels(train_path, img_size, batch_size,
augment=True,
hyp=hyp, # augmentation hyperparameters
rect=opt.rect, # rectangular training
cache_images=opt.cache_images,
single_cls=opt.single_cls)
# Dataloader
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
# load with single process
nw = 0
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
shuffle=not opt.rect, # Shuffle=True unless rectangular training is used
pin_memory=True,
collate_fn=dataset.collate_fn)
# Testloader
testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size,
hyp=hyp,
rect=True,
cache_images=opt.cache_images,
single_cls=opt.single_cls),
batch_size=batch_size,
num_workers=nw,
pin_memory=True,
collate_fn=dataset.collate_fn)
# TorchBench: prefetch the dataloader
if opt.prefetch:
dataloader = _prefetch_loader(dataloader, size=opt.train_num_batch*batch_size,
fields=[0, 1],
collate_fn=lambda x: x.to(device) if isinstance(x, torch.Tensor) else x)
# Model parameters
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou)
model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights
# Model EMA
ema = torch_utils.ModelEMA(model)
def train_loop(epochs=1):
epoch = 0
nonlocal img_size, best_fitness
# Start training
nb = len(dataloader) # number of batches
n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations)
maps = np.zeros(nc) # mAP per class
# torch.autograd.set_detect_anomaly(True)
results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'
t0 = time.time()
# print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test))
# print('Using %g dataloader workers' % nw)
# print('Starting training for %g epochs...' % epochs)
for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if dataset.image_weights:
w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights
image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w)
dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx
mloss = torch.zeros(4).to(device) # mean losses
# print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size'))
# pbar = tqdm(zip(range(opt.train_num_batch), dataloader), total=nb) # progress bar
pbar = zip(range(opt.train_num_batch), dataloader)
for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------
if i > 3:
break
ni = i + nb * epoch # number integrated batches (since train start)
imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
# Burn-in
if ni <= n_burn:
xi = [0, n_burn] # x interp
model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou)
accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round())
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])
x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0])
if 'momentum' in x:
x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']])
# Multi-Scale
if opt.multi_scale:
if ni / accumulate % 1 == 0: # adjust img_size (67% - 150%) every 1 batch
img_size = random.randrange(grid_min, grid_max + 1) * gs
sf = img_size / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple)
imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)
# Forward
pred = model(imgs)
# Loss
loss, loss_items = compute_loss(pred, targets, model)
if not torch.isfinite(loss):
print('WARNING: non-finite loss, ending training ', loss_items)
return results
# Backward
loss *= batch_size / 64 # scale loss
if mixed_precision:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Optimize
if ni % accumulate == 0:
optimizer.step()
optimizer.zero_grad()
ema.update(model)
# Print
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB)
s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size)
# pbar.set_description(s)
# Plot
if ni < 1:
f = 'train_batch%g.jpg' % i # filename
# TorchBench: do not write result jpg
# res = plot_images(images=imgs, targets=targets, paths=paths, fname=f)
# if tb_writer:
# tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
# end batch ------------------------------------------------------------------------------------------------
# Update scheduler
scheduler.step()
# Process epoch results
ema.update_attr(model)
final_epoch = epoch + 1 == epochs
# if not opt.notest or final_epoch: # Calculate mAP
# is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80
# results, maps = test(cfg,
# data,
# batch_size=batch_size,
# imgsz=imgsz_test,
# model=ema.ema,
# save_json=final_epoch and is_coco,
# single_cls=opt.single_cls,
# dataloader=testloader,
# multi_label=ni > n_burn)
# Write
# with open(results_file, 'a') as f:
# f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls)
if len(opt.name) and opt.bucket:
os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name))
# Tensorboard
if tb_writer:
tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss',
'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1',
'val/giou_loss', 'val/obj_loss', 'val/cls_loss']
for x, tag in zip(list(mloss[:-1]) + list(results), tags):
tb_writer.add_scalar(tag, x, epoch)
# Update best mAP
fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1]
if fi > best_fitness:
best_fitness = fi
# Save model
save = (not opt.nosave) or (final_epoch and not opt.evolve)
# TorchBench: do not save the result
save = False
if save:
with open(results_file, 'r') as f: # create checkpoint
ckpt = {'epoch': epoch,
'best_fitness': best_fitness,
'training_results': f.read(),
'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(),
'optimizer': None if final_epoch else optimizer.state_dict()}
# Save last, best and delete
# torch.save(ckpt, last)
# if (best_fitness == fi) and not final_epoch:
# torch.save(ckpt, best)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
n = opt.name
if len(n):
n = '_' + n if not n.isnumeric() else n
fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n
for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]):
if os.path.exists(f1):
os.rename(f1, f2) # rename
ispt = f2.endswith('.pt') # is *.pt
strip_optimizer(f2) if ispt else None # strip optimizer
os.system('gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket and ispt else None # upload
# if not opt.evolve:
# plot_results() # save as results.png
# print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600))
# dist.destroy_process_group() if torch.cuda.device_count() > 1 else None
# torchbench: disable empty cache
# torch.cuda.empty_cache()
return results
return train_loop, model, dataloader
root = str(Path(__file__).parent.resolve())
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=300) # 500200 batches at bs 16, 117263 COCO images = 273 epochs
parser.add_argument('--batch-size', type=int, default=16) # effective bs = batch_size * accumulate = 16 * 4 = 64
parser.add_argument('--cfg', type=str, default=f'{root}/cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--data', type=str, default=f'{root}/data/coco2017.data', help='*.data path')
parser.add_argument('--multi-scale', action='store_true', help='adjust (67%% - 150%%) img_size every 10 batches')
parser.add_argument('--img-size', nargs='+', type=int, default=[320, 640], help='[min_train, max-train, test]')
parser.add_argument('--rect', action='store_true', help='rectangular training')
parser.add_argument('--resume', action='store_true', help='resume training from last.pt')
parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')
parser.add_argument('--notest', action='store_true', help='only test final epoch')
parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')
parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')
parser.add_argument('--cache-images', action='store_true', help='cache images for faster training')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='initial weights path')
parser.add_argument('--name', default='', help='renames results.txt to results_name.txt if supplied')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1 or cpu)')
parser.add_argument('--adam', action='store_true', help='use adam optimizer')
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
parser.add_argument('--freeze-layers', action='store_true', help='Freeze non-output layers')
# Extra args added by TorchBench
parser.add_argument('--train-num-batch', type=int, default=1, help='Number of batches to run')
parser.add_argument('--prefetch', action='store_true', help='Whether to prefetch dataloader')
opt = parser.parse_args(args)
opt.weights = last if opt.resume else opt.weights
# check_git_status()
opt.cfg = check_file(opt.cfg) # check file
print(opt.data)
opt.data = check_file(opt.data) # check file
opt.img_size.extend([opt.img_size[-1]] * (3 - len(opt.img_size))) # extend to 3 sizes (min, max, test)
device = torch_utils.select_device(opt.device, apex=mixed_precision, batch_size=opt.batch_size)
if device.type == 'cpu':
mixed_precision = False
# scale hyp['obj'] by img_size (evolved at 320)
# hyp['obj'] *= opt.img_size[0] / 320.
tb_writer = None
if not opt.evolve: # Train normally
# print('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/')
# tb_writer = SummaryWriter(comment=opt.name)
return get_train(hyp) # train normally
else: # Evolve hyperparameters (optional)
opt.notest, opt.nosave = True, True # only test/save final epoch
if opt.bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists
for _ in range(1): # generations to evolve
if os.path.exists('evolve.txt'): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = 'single' # parent selection method: 'single' or 'weighted'
x = np.loadtxt('evolve.txt', ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == 'single' or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == 'weighted':
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
method, mp, s = 3, 0.9, 0.2 # method, mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([1, 1, 1, 1, 1, 1, 1, 0, .1, 1, 0, 1, 1, 1, 1, 1, 1, 1]) # gains
ng = len(g)
if method == 1:
v = (npr.randn(ng) * npr.random() * g * s + 1) ** 2.0
elif method == 2:
v = (npr.randn(ng) * npr.random(ng) * g * s + 1) ** 2.0
elif method == 3:
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
# v = (g * (npr.random(ng) < mp) * npr.randn(ng) * s + 1) ** 2.0
v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = x[i + 7] * v[i] # mutate
# Clip to limits
keys = ['lr0', 'iou_t', 'momentum', 'weight_decay', 'hsv_s', 'hsv_v', 'translate', 'scale', 'fl_gamma']
limits = [(1e-5, 1e-2), (0.00, 0.70), (0.60, 0.98), (0, 0.001), (0, .9), (0, .9), (0, .9), (0, .9), (0, 3)]
for k, v in zip(keys, limits):
hyp[k] = np.clip(hyp[k], v[0], v[1])
# Train mutation
results = train(hyp.copy())
# Write mutation results
print_mutation(hyp, results, opt.bucket)
# Plot results
# plot_evolution_results(hyp)
|
#!/usr/bin/env python
# Make all randomness deterministic
import random
import argparse
import torch
import os
import numpy as np
from contextlib import nullcontext
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
from shlex import split
from .yolo_train import prepare_training_loop
from . import yolo_train
from typing import Tuple
from .yolo_models import * # set ONNX_EXPORT in models.py
from .yolo_utils.datasets import *
from .yolo_utils.utils import *
from pathlib import Path
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
DATA_DIR = os.path.join(CURRENT_DIR.parent.parent, "data", ".data", "coco128")
assert os.path.exists(DATA_DIR), "Couldn't find coco128 data dir, please run install.py again."
class Model(BenchmarkModel):
task = COMPUTER_VISION.SEGMENTATION
# Original train batch size: 16
# Source: https://github.com/ultralytics/yolov3/blob/master/train.py#L447
DEFAULT_TRAIN_BSIZE = 16
DEFAULT_EVAL_BSIZE = 8
# yolov3 CUDA inference test uses amp precision
DEFAULT_EVAL_CUDA_PRECISION = "amp"
# TODO: yolov3 does use an optimizer, but it is inaccessible from this file.
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
# run just 1 epoch
self.num_epochs = 1
self.train_num_batch = 1
self.prefetch = True
if test == "train":
train_args = split(f"--data {DATA_DIR}/coco128.data --img 416 --batch {self.batch_size} --nosave --notest \
--epochs {self.num_epochs} --device {self.device_str} --weights '' \
--train-num-batch {self.train_num_batch} \
--prefetch")
self.training_loop, self.model, self.example_inputs = prepare_training_loop(train_args)
elif test == "eval":
self.model, self.example_inputs = self.prep_eval()
self.amp_context = nullcontext
def prep_eval(self):
parser = argparse.ArgumentParser()
root = str(Path(yolo_train.__file__).parent.absolute())
parser.add_argument('--cfg', type=str, default=f'{root}/cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default=f'{DATA_DIR}/coco.names', help='*.names path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
parser.add_argument('--half', action='store_true', help='half precision FP16 inference')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
opt = parser.parse_args(['--device', self.device])
opt.cfg = check_file(opt.cfg) # check file
opt.names = check_file(opt.names) # check file
model = Darknet(opt.cfg, opt.img_size)
model.to(opt.device).eval()
example_inputs = (torch.rand(self.batch_size, 3, 384, 512).to(self.device),)
return model, example_inputs
def get_module(self):
return self.model, self.example_inputs
def train(self):
# the training process is not patched to use scripted models
return self.training_loop()
def eval(self) -> Tuple[torch.Tensor]:
model, example_inputs = self.get_module()
out = model(*example_inputs, augment=False)
pred = out[0]
# Apply NMS
pred = non_max_suppression(pred, 0.3, 0.6,
multi_label=False, classes=None, agnostic=False)
return (out[0],) + out[1]
@property
def device_str(self):
"""YoloV3 uses individual GPU indices."""
return str(
torch.cuda.current_device() if self.device == "cuda"
else self.device
)
|
import argparse
import json
from torch.utils.data import DataLoader
from .yolo_models import *
from .yolo_utils.datasets import *
from .yolo_utils.utils import *
import os.path
def test(cfg,
data,
weights=None,
batch_size=16,
imgsz=416,
conf_thres=0.001,
iou_thres=0.6, # for nms
save_json=False,
single_cls=False,
augment=False,
model=None,
dataloader=None,
multi_label=True):
# Initialize/load model and set device
if model is None:
is_training = False
device = torch_utils.select_device(opt.device, batch_size=batch_size)
verbose = opt.task == 'test'
# Remove previous
for f in glob.glob('test_batch*.jpg'):
os.remove(f)
# Initialize model
model = Darknet(cfg, imgsz)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
load_darknet_weights(model, weights)
# Fuse
model.fuse()
model.to(device)
if device.type != 'cpu' and torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
else: # called by train.py
is_training = True
device = next(model.parameters()).device # get model device
verbose = False
# Configure run
data = parse_data_cfg(data)
nc = 1 if single_cls else int(data['classes']) # number of classes
path = os.path.dirname(__file__) + '/' + data['valid'] # path to test images
names = load_classes(os.path.dirname(__file__) + '/' + data['names']) # class names
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95
iouv = iouv[0].view(1) # comment for [email protected]:0.95
niou = iouv.numel()
# Dataloader
if dataloader is None:
dataset = LoadImagesAndLabels(path, imgsz, batch_size, rect=True, single_cls=opt.single_cls, pad=0.5)
batch_size = min(batch_size, len(dataset))
dataloader = DataLoader(dataset,
batch_size=batch_size,
num_workers=min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]),
pin_memory=True,
collate_fn=dataset.collate_fn)
seen = 0
model.eval()
_ = model(torch.zeros((1, 3, imgsz, imgsz), device=device)) if device.type != 'cpu' else None # run once
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', 'F1')
p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class = [], [], [], []
for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = imgs.shape # batch size, channels, height, width
whwh = torch.Tensor([width, height, width, height]).to(device)
# Disable gradients
with torch.no_grad():
# Run model
t = torch_utils.time_synchronized()
inf_out, train_out = model(imgs, augment=augment) # inference and training outputs
t0 += torch_utils.time_synchronized() - t
# Compute loss
if is_training: # if model has loss hyperparameters
loss += compute_loss(train_out, targets, model)[1][:3] # GIoU, obj, cls
# Run NMS
t = torch_utils.time_synchronized()
output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, multi_label=multi_label)
t1 += torch_utils.time_synchronized() - t
# Statistics per image
for si, pred in enumerate(output):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
seen += 1
if pred is None:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Append to text file
# with open('test.txt', 'a') as file:
# [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]
# Clip boxes to image bounds
clip_coords(pred, (height, width))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(Path(paths[si]).stem.split('_')[-1])
box = pred[:, :4].clone() # xyxy
scale_coords(imgs[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape
box = xyxy2xywh(box) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])],
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5]) * whwh
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero().view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero().view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
for j in (ious > iouv[0]).nonzero():
d = ti[i[j]] # detected target
if d not in detected:
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if batch_i < 1:
f = 'test_batch%g_gt.jpg' % batch_i # filename
plot_images(imgs, targets, paths=paths, names=names, fname=f) # ground truth
f = 'test_batch%g_pred.jpg' % batch_i
plot_images(imgs, output_to_target(output, width, height), paths=paths, names=names, fname=f) # predictions
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats):
p, r, ap, f1, ap_class = ap_per_class(*stats)
if niou > 1:
p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(1), ap[:, 0] # [P, R, [email protected]:0.95, [email protected]]
mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%10.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))
# Print results per class
if verbose and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))
# Print speeds
if verbose or save_json:
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Save JSON
if save_json and map and len(jdict):
print('\nCOCO mAP with pycocotools...')
imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files]
with open('results.json', 'w') as file:
json.dump(jdict, file)
try:
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
# https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0]) # initialize COCO ground truth api
cocoDt = cocoGt.loadRes('results.json') # initialize COCO pred api
cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
cocoEval.params.imgIds = imgIds # [:32] # only evaluate these images
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
# mf1, map = cocoEval.stats[:2] # update to pycocotools results ([email protected]:0.95, [email protected])
except:
print('WARNING: pycocotools must be installed with numpy==1.17 to run correctly. '
'See https://github.com/cocodataset/cocoapi/issues/356')
# Return results
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map, mf1, *(loss.cpu() / len(dataloader)).tolist()), maps
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--data', type=str, default='data/coco2014.data', help='*.data path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--batch-size', type=int, default=16, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--task', default='test', help="'test', 'study', 'benchmark'")
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
opt = parser.parse_args()
opt.save_json = opt.save_json or any([x in opt.data for x in ['coco.data', 'coco2014.data', 'coco2017.data']])
opt.cfg = check_file(opt.cfg) # check file
opt.data = check_file(opt.data) # check file
print(opt)
# task = 'test', 'study', 'benchmark'
if opt.task == 'test': # (default) test normally
test(opt.cfg,
opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment)
elif opt.task == 'benchmark': # mAPs at 256-640 at conf 0.5 and 0.7
y = []
for i in list(range(256, 640, 128)): # img-size
for j in [0.6, 0.7]: # iou-thres
t = time.time()
r = test(opt.cfg, opt.data, opt.weights, opt.batch_size, i, opt.conf_thres, j, opt.save_json)[0]
y.append(r + (time.time() - t,))
np.savetxt('benchmark.txt', y, fmt='%10.4g') # y = np.loadtxt('study.txt')
|
import subprocess
import sys
import os
from pathlib import Path
def setup_data_dir():
current_dir = Path(os.path.dirname(os.path.realpath(__file__)))
coco128_data_dir = os.path.join(current_dir.parent.parent, "data", ".data", "coco128")
assert os.path.exists(coco128_data_dir), "Couldn't find coco128 data dir, please run install.py again."
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
setup_data_dir()
|
from .yolo_utils.google_utils import *
from .yolo_utils.layers import *
from .yolo_utils.parse_config import *
ONNX_EXPORT = False
def create_modules(module_defs, img_size, cfg):
# Constructs module list of layer blocks from module configuration in module_defs
img_size = [img_size] * 2 if isinstance(img_size, int) else img_size # expand if necessary
_ = module_defs.pop(0) # cfg training hyperparams (unused)
output_filters = [3] # input channels
module_list = nn.ModuleList()
routs = [] # list of layers which rout to deeper layers
yolo_index = -1
for i, mdef in enumerate(module_defs):
modules = nn.Sequential()
if mdef['type'] == 'convolutional':
bn = mdef['batch_normalize']
filters = mdef['filters']
k = mdef['size'] # kernel size
stride = mdef['stride'] if 'stride' in mdef else (mdef['stride_y'], mdef['stride_x'])
if isinstance(k, int): # single-size conv
modules.add_module('Conv2d', nn.Conv2d(in_channels=output_filters[-1],
out_channels=filters,
kernel_size=k,
stride=stride,
padding=k // 2 if mdef['pad'] else 0,
groups=mdef['groups'] if 'groups' in mdef else 1,
bias=not bn))
else: # multiple-size conv
modules.add_module('MixConv2d', MixConv2d(in_ch=output_filters[-1],
out_ch=filters,
k=k,
stride=stride,
bias=not bn))
if bn:
modules.add_module('BatchNorm2d', nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4))
else:
routs.append(i) # detection output (goes into yolo layer)
if mdef['activation'] == 'leaky': # activation study https://github.com/ultralytics/yolov3/issues/441
modules.add_module('activation', nn.LeakyReLU(0.1, inplace=True))
elif mdef['activation'] == 'swish':
modules.add_module('activation', Swish())
elif mdef['activation'] == 'mish':
modules.add_module('activation', Mish())
elif mdef['type'] == 'BatchNorm2d':
filters = output_filters[-1]
modules = nn.BatchNorm2d(filters, momentum=0.03, eps=1E-4)
if i == 0 and filters == 3: # normalize RGB image
# imagenet mean and var https://pytorch.org/docs/stable/torchvision/models.html#classification
modules.running_mean = torch.tensor([0.485, 0.456, 0.406])
modules.running_var = torch.tensor([0.0524, 0.0502, 0.0506])
elif mdef['type'] == 'maxpool':
k = mdef['size'] # kernel size
stride = mdef['stride']
maxpool = nn.MaxPool2d(kernel_size=k, stride=stride, padding=(k - 1) // 2)
if k == 2 and stride == 1: # yolov3-tiny
modules.add_module('ZeroPad2d', nn.ZeroPad2d((0, 1, 0, 1)))
modules.add_module('MaxPool2d', maxpool)
else:
modules = maxpool
elif mdef['type'] == 'upsample':
if ONNX_EXPORT: # explicitly state size, avoid scale_factor
g = (yolo_index + 1) * 2 / 32 # gain
modules = nn.Upsample(size=tuple(int(x * g) for x in img_size)) # img_size = (320, 192)
else:
modules = nn.Upsample(scale_factor=mdef['stride'])
elif mdef['type'] == 'route': # nn.Sequential() placeholder for 'route' layer
layers = mdef['layers']
filters = sum([output_filters[l + 1 if l > 0 else l] for l in layers])
routs.extend([i + l if l < 0 else l for l in layers])
modules = FeatureConcat(layers=layers)
elif mdef['type'] == 'shortcut': # nn.Sequential() placeholder for 'shortcut' layer
layers = mdef['from']
filters = output_filters[-1]
routs.extend([i + l if l < 0 else l for l in layers])
modules = WeightedFeatureFusion(layers=layers, weight='weights_type' in mdef)
elif mdef['type'] == 'reorg3d': # yolov3-spp-pan-scale
pass
elif mdef['type'] == 'yolo':
yolo_index += 1
stride = [32, 16, 8] # P5, P4, P3 strides
if any(x in cfg for x in ['panet', 'yolov4', 'cd53']): # stride order reversed
stride = list(reversed(stride))
layers = mdef['from'] if 'from' in mdef else []
modules = YOLOLayer(anchors=mdef['anchors'][mdef['mask']], # anchor list
nc=mdef['classes'], # number of classes
img_size=img_size, # (416, 416)
yolo_index=yolo_index, # 0, 1, 2...
layers=layers, # output layers
stride=stride[yolo_index])
try:
j = layers[yolo_index] if 'from' in mdef else -1
bias_ = module_list[j][0].bias # shape(255,)
bias = bias_[:modules.no * modules.na].view(modules.na, -1) # shape(3,85)
with torch.no_grad(): # avoids "requires grad is being used in an in-place operation"
bias.data[:, 4] += -4.5 # obj
bias.data[:, 5:] += math.log(0.6 / (modules.nc - 0.99)) # cls (sigmoid(p) = 1/nc)
module_list[j][0].bias = torch.nn.Parameter(bias_, requires_grad=bias_.requires_grad)
except:
print('WARNING: smart bias initialization failure.')
else:
print('Warning: Unrecognized Layer Type: ' + mdef['type'])
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
routs_binary = [False] * (i + 1)
for i in routs:
routs_binary[i] = True
return module_list, routs_binary
class YOLOLayer(nn.Module):
def __init__(self, anchors, nc, img_size, yolo_index, layers, stride):
super(YOLOLayer, self).__init__()
self.anchors = torch.Tensor(anchors)
self.index = yolo_index # index of this layer in layers
self.layers = layers # model output layer indices
self.stride = stride # layer stride
self.nl = len(layers) # number of output layers (3)
self.na = len(anchors) # number of anchors (3)
self.nc = nc # number of classes (80)
self.no = nc + 5 # number of outputs (85)
self.nx, self.ny, self.ng = 0, 0, 0 # initialize number of x, y gridpoints
self.anchor_vec = self.anchors / self.stride
self.anchor_wh = self.anchor_vec.view(1, self.na, 1, 1, 2)
if ONNX_EXPORT:
self.training = False
self.create_grids((img_size[1] // stride, img_size[0] // stride)) # number x, y grid points
def create_grids(self, ng=(13, 13), device='cpu'):
self.nx, self.ny = ng # x and y grid size
self.ng = torch.tensor(ng, dtype=torch.float)
# build xy offsets
if not self.training:
yv, xv = torch.meshgrid([torch.arange(self.ny, device=device), torch.arange(self.nx, device=device)])
self.grid = torch.stack((xv, yv), 2).view((1, 1, self.ny, self.nx, 2)).float()
if self.anchor_vec.device != device:
self.anchor_vec = self.anchor_vec.to(device)
self.anchor_wh = self.anchor_wh.to(device)
def forward(self, p, out):
ASFF = False # https://arxiv.org/abs/1911.09516
if ASFF:
i, n = self.index, self.nl # index in layers, number of layers
p = out[self.layers[i]]
bs, _, ny, nx = p.shape # bs, 255, 13, 13
if (self.nx, self.ny) != (nx, ny):
self.create_grids((nx, ny), p.device)
# outputs and weights
# w = F.softmax(p[:, -n:], 1) # normalized weights
w = torch.sigmoid(p[:, -n:]) * (2 / n) # sigmoid weights (faster)
# w = w / w.sum(1).unsqueeze(1) # normalize across layer dimension
# weighted ASFF sum
p = out[self.layers[i]][:, :-n] * w[:, i:i + 1]
for j in range(n):
if j != i:
p += w[:, j:j + 1] * \
F.interpolate(out[self.layers[j]][:, :-n], size=[ny, nx], mode='bilinear', align_corners=False)
elif ONNX_EXPORT:
bs = 1 # batch size
else:
bs, _, ny, nx = p.shape # bs, 255, 13, 13
if (self.nx, self.ny) != (nx, ny):
self.create_grids((nx, ny), p.device)
# p.view(bs, 255, 13, 13) -- > (bs, 3, 13, 13, 85) # (bs, anchors, grid, grid, classes + xywh)
p = p.view(bs, self.na, self.no, self.ny, self.nx).permute(0, 1, 3, 4, 2).contiguous() # prediction
if self.training:
return p
elif ONNX_EXPORT:
# Avoid broadcasting for ANE operations
m = self.na * self.nx * self.ny
ng = 1. / self.ng.repeat(m, 1)
grid = self.grid.repeat(1, self.na, 1, 1, 1).view(m, 2)
anchor_wh = self.anchor_wh.repeat(1, 1, self.nx, self.ny, 1).view(m, 2) * ng
p = p.view(m, self.no)
xy = torch.sigmoid(p[:, 0:2]) + grid # x, y
wh = torch.exp(p[:, 2:4]) * anchor_wh # width, height
p_cls = torch.sigmoid(p[:, 4:5]) if self.nc == 1 else \
torch.sigmoid(p[:, 5:self.no]) * torch.sigmoid(p[:, 4:5]) # conf
return p_cls, xy * ng, wh
else: # inference
io = p.clone() # inference output
io[..., :2] = torch.sigmoid(io[..., :2]) + self.grid # xy
io[..., 2:4] = torch.exp(io[..., 2:4]) * self.anchor_wh # wh yolo method
io[..., :4] *= self.stride
torch.sigmoid_(io[..., 4:])
return io.view(bs, -1, self.no), p # view [1, 3, 13, 13, 85] as [1, 507, 85]
class Darknet(nn.Module):
# YOLOv3 object detection model
def __init__(self, cfg, img_size=(416, 416), verbose=False):
super(Darknet, self).__init__()
self.module_defs = parse_model_cfg(cfg)
self.module_list, self.routs = create_modules(self.module_defs, img_size, cfg)
self.yolo_layers = get_yolo_layers(self)
# torch_utils.initialize_weights(self)
# Darknet Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
self.version = np.array([0, 2, 5], dtype=np.int32) # (int32) version info: major, minor, revision
self.seen = np.array([0], dtype=np.int64) # (int64) number of images seen during training
# self.info(verbose) if not ONNX_EXPORT else None # print model description
def forward(self, x, augment=False, verbose=False):
if not augment:
return self.forward_once(x)
else: # Augment images (inference and test only) https://github.com/ultralytics/yolov3/issues/931
img_size = x.shape[-2:] # height, width
s = [0.83, 0.67] # scales
y = []
for i, xi in enumerate((x,
torch_utils.scale_img(x.flip(3), s[0], same_shape=False), # flip-lr and scale
torch_utils.scale_img(x, s[1], same_shape=False), # scale
)):
# cv2.imwrite('img%g.jpg' % i, 255 * xi[0].numpy().transpose((1, 2, 0))[:, :, ::-1])
y.append(self.forward_once(xi)[0])
y[1][..., :4] /= s[0] # scale
y[1][..., 0] = img_size[1] - y[1][..., 0] # flip lr
y[2][..., :4] /= s[1] # scale
# for i, yi in enumerate(y): # coco small, medium, large = < 32**2 < 96**2 <
# area = yi[..., 2:4].prod(2)[:, :, None]
# if i == 1:
# yi *= (area < 96. ** 2).float()
# elif i == 2:
# yi *= (area > 32. ** 2).float()
# y[i] = yi
y = torch.cat(y, 1)
return y, None
def forward_once(self, x, augment=False, verbose=False):
img_size = x.shape[-2:] # height, width
yolo_out, out = [], []
if verbose:
print('0', x.shape)
str = ''
# Augment images (inference and test only)
if augment: # https://github.com/ultralytics/yolov3/issues/931
nb = x.shape[0] # batch size
s = [0.83, 0.67] # scales
x = torch.cat((x,
torch_utils.scale_img(x.flip(3), s[0]), # flip-lr and scale
torch_utils.scale_img(x, s[1]), # scale
), 0)
for i, module in enumerate(self.module_list):
name = module.__class__.__name__
if name in ['WeightedFeatureFusion', 'FeatureConcat']: # sum, concat
if verbose:
l = [i - 1] + module.layers # layers
sh = [list(x.shape)] + [list(out[i].shape) for i in module.layers] # shapes
str = ' >> ' + ' + '.join(['layer %g %s' % x for x in zip(l, sh)])
x = module(x, out) # WeightedFeatureFusion(), FeatureConcat()
elif name == 'YOLOLayer':
yolo_out.append(module(x, out))
else: # run module directly, i.e. mtype = 'convolutional', 'upsample', 'maxpool', 'batchnorm2d' etc.
x = module(x)
out.append(x if self.routs[i] else [])
if verbose:
print('%g/%g %s -' % (i, len(self.module_list), name), list(x.shape), str)
str = ''
if self.training: # train
return yolo_out
elif ONNX_EXPORT: # export
x = [torch.cat(x, 0) for x in zip(*yolo_out)]
return x[0], torch.cat(x[1:3], 1) # scores, boxes: 3780x80, 3780x4
else: # inference or test
x, p = zip(*yolo_out) # inference output, training output
x = torch.cat(x, 1) # cat yolo outputs
if augment: # de-augment results
x = torch.split(x, nb, dim=0)
x[1][..., :4] /= s[0] # scale
x[1][..., 0] = img_size[1] - x[1][..., 0] # flip lr
x[2][..., :4] /= s[1] # scale
x = torch.cat(x, 1)
return x, p
def fuse(self):
# Fuse Conv2d + BatchNorm2d layers throughout model
print('Fusing layers...')
fused_list = nn.ModuleList()
for a in list(self.children())[0]:
if isinstance(a, nn.Sequential):
for i, b in enumerate(a):
if isinstance(b, nn.modules.batchnorm.BatchNorm2d):
# fuse this bn layer with the previous conv2d layer
conv = a[i - 1]
fused = torch_utils.fuse_conv_and_bn(conv, b)
a = nn.Sequential(fused, *list(a.children())[i + 1:])
break
fused_list.append(a)
self.module_list = fused_list
self.info() if not ONNX_EXPORT else None # yolov3-spp reduced from 225 to 152 layers
def info(self, verbose=False):
torch_utils.model_info(self, verbose)
def get_yolo_layers(model):
return [i for i, m in enumerate(model.module_list) if m.__class__.__name__ == 'YOLOLayer'] # [89, 101, 113]
def load_darknet_weights(self, weights, cutoff=-1):
# Parses and loads the weights stored in 'weights'
# Establish cutoffs (load layers between 0 and cutoff. if cutoff = -1 all are loaded)
file = Path(weights).name
if file == 'darknet53.conv.74':
cutoff = 75
elif file == 'yolov3-tiny.conv.15':
cutoff = 15
# Read weights file
with open(weights, 'rb') as f:
# Read Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
self.version = np.fromfile(f, dtype=np.int32, count=3) # (int32) version info: major, minor, revision
self.seen = np.fromfile(f, dtype=np.int64, count=1) # (int64) number of images seen during training
weights = np.fromfile(f, dtype=np.float32) # the rest are weights
ptr = 0
for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if mdef['type'] == 'convolutional':
conv = module[0]
if mdef['batch_normalize']:
# Load BN bias, weights, running mean and running variance
bn = module[1]
nb = bn.bias.numel() # number of biases
# Bias
bn.bias.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.bias))
ptr += nb
# Weight
bn.weight.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.weight))
ptr += nb
# Running Mean
bn.running_mean.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.running_mean))
ptr += nb
# Running Var
bn.running_var.data.copy_(torch.from_numpy(weights[ptr:ptr + nb]).view_as(bn.running_var))
ptr += nb
else:
# Load conv. bias
nb = conv.bias.numel()
conv_b = torch.from_numpy(weights[ptr:ptr + nb]).view_as(conv.bias)
conv.bias.data.copy_(conv_b)
ptr += nb
# Load conv. weights
nw = conv.weight.numel() # number of weights
conv.weight.data.copy_(torch.from_numpy(weights[ptr:ptr + nw]).view_as(conv.weight))
ptr += nw
def save_weights(self, path='model.weights', cutoff=-1):
# Converts a PyTorch model to Darket format (*.pt to *.weights)
# Note: Does not work if model.fuse() is applied
with open(path, 'wb') as f:
# Write Header https://github.com/AlexeyAB/darknet/issues/2914#issuecomment-496675346
self.version.tofile(f) # (int32) version info: major, minor, revision
self.seen.tofile(f) # (int64) number of images seen during training
# Iterate through layers
for i, (mdef, module) in enumerate(zip(self.module_defs[:cutoff], self.module_list[:cutoff])):
if mdef['type'] == 'convolutional':
conv_layer = module[0]
# If batch norm, load bn first
if mdef['batch_normalize']:
bn_layer = module[1]
bn_layer.bias.data.cpu().numpy().tofile(f)
bn_layer.weight.data.cpu().numpy().tofile(f)
bn_layer.running_mean.data.cpu().numpy().tofile(f)
bn_layer.running_var.data.cpu().numpy().tofile(f)
# Load conv bias
else:
conv_layer.bias.data.cpu().numpy().tofile(f)
# Load conv weights
conv_layer.weight.data.cpu().numpy().tofile(f)
def convert(cfg='cfg/yolov3-spp.cfg', weights='weights/yolov3-spp.weights'):
# Converts between PyTorch and Darknet format per extension (i.e. *.weights convert to *.pt and vice versa)
# from models import *; convert('cfg/yolov3-spp.cfg', 'weights/yolov3-spp.weights')
# Initialize model
model = Darknet(cfg)
# Load weights and save
if weights.endswith('.pt'): # if PyTorch format
model.load_state_dict(torch.load(weights, map_location='cpu')['model'])
target = weights.rsplit('.', 1)[0] + '.weights'
save_weights(model, path=target, cutoff=-1)
print("Success: converted '%s' to '%s'" % (weights, target))
elif weights.endswith('.weights'): # darknet format
_ = load_darknet_weights(model, weights)
chkpt = {'epoch': -1,
'best_fitness': None,
'training_results': None,
'model': model.state_dict(),
'optimizer': None}
target = weights.rsplit('.', 1)[0] + '.pt'
torch.save(chkpt, target)
print("Success: converted '%s' to '%s'" % (weights, target))
else:
print('Error: extension not supported.')
def attempt_download(weights):
# Attempt to download pretrained weights if not found locally
weights = weights.strip()
msg = weights + ' missing, try downloading from https://drive.google.com/open?id=1LezFG5g3BCW6iYaV89B2i64cqEUZD7e0'
if len(weights) > 0 and not os.path.isfile(weights):
d = {'yolov3-spp.weights': '16lYS4bcIdM2HdmyJBVDOvt3Trx6N3W2R',
'yolov3.weights': '1uTlyDWlnaqXcsKOktP5aH_zRDbfcDp-y',
'yolov3-tiny.weights': '1CCF-iNIIkYesIDzaPvdwlcf7H9zSsKZQ',
'yolov3-spp.pt': '1f6Ovy3BSq2wYq4UfvFUpxJFNDFfrIDcR',
'yolov3.pt': '1SHNFyoe5Ni8DajDNEqgB2oVKBb_NoEad',
'yolov3-tiny.pt': '10m_3MlpQwRtZetQxtksm9jqHrPTHZ6vo',
'darknet53.conv.74': '1WUVBid-XuoUBmvzBVUCBl_ELrzqwA8dJ',
'yolov3-tiny.conv.15': '1Bw0kCpplxUqyRYAJr9RY9SGnOJbo9nEj',
'yolov3-spp-ultralytics.pt': '1UcR-zVoMs7DH5dj3N1bswkiQTA4dmKF4'}
file = Path(weights).name
if file in d:
r = gdrive_download(id=d[file], name=weights)
else: # download from pjreddie.com
url = 'https://pjreddie.com/media/files/' + file
print('Downloading ' + url)
r = os.system('curl -f ' + url + ' -o ' + weights)
# Error check
if not (r == 0 and os.path.exists(weights) and os.path.getsize(weights) > 1E6): # weights exist and > 1MB
os.system('rm ' + weights) # remove partial downloads
raise Exception(msg)
|
import argparse
from models import * # set ONNX_EXPORT in models.py
from utils.datasets import *
from utils.utils import *
def detect(save_img=False):
imgsz = (320, 192) if ONNX_EXPORT else opt.img_size # (320, 192) or (416, 256) or (608, 352) for (height, width)
out, source, weights, half, view_img, save_txt = opt.output, opt.source, opt.weights, opt.half, opt.view_img, opt.save_txt
webcam = source == '0' or source.startswith('rtsp') or source.startswith('http') or source.endswith('.txt')
# Initialize
device = torch_utils.select_device(device='cpu' if ONNX_EXPORT else opt.device)
if os.path.exists(out):
shutil.rmtree(out) # delete output folder
os.makedirs(out) # make new output folder
# Initialize model
model = Darknet(opt.cfg, imgsz)
# Load weights
attempt_download(weights)
if weights.endswith('.pt'): # pytorch format
model.load_state_dict(torch.load(weights, map_location=device)['model'])
else: # darknet format
load_darknet_weights(model, weights)
# Second-stage classifier
classify = False
if classify:
modelc = torch_utils.load_classifier(name='resnet101', n=2) # initialize
modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']) # load weights
modelc.to(device).eval()
# Eval mode
model.to(device).eval()
# Fuse Conv2d + BatchNorm2d layers
# model.fuse()
# Export mode
if ONNX_EXPORT:
model.fuse()
img = torch.zeros((1, 3) + imgsz) # (1, 3, 320, 192)
f = opt.weights.replace(opt.weights.split('.')[-1], 'onnx') # *.onnx filename
torch.onnx.export(model, img, f, verbose=False, opset_version=11,
input_names=['images'], output_names=['classes', 'boxes'])
# Validate exported model
import onnx
model = onnx.load(f) # Load the ONNX model
onnx.checker.check_model(model) # Check that the IR is well formed
print(onnx.helper.printable_graph(model.graph)) # Print a human readable representation of the graph
return
# Half precision
half = half and device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Set Dataloader
vid_path, vid_writer = None, None
if webcam:
view_img = True
torch.backends.cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadStreams(source, img_size=imgsz)
else:
save_img = True
dataset = LoadImages(source, img_size=imgsz)
# Get names and colors
names = load_classes(opt.names)
colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img.half() if half else img.float()) if device.type != 'cpu' else None # run once
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = torch_utils.time_synchronized()
pred = model(img, augment=opt.augment)[0]
t2 = torch_utils.time_synchronized()
# to float
if half:
pred = pred.float()
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres,
multi_label=False, classes=opt.classes, agnostic=opt.agnostic_nms)
# Apply Classifier
if classify:
pred = apply_classifier(pred, modelc, img, im0s)
# Process detections
for i, det in enumerate(pred): # detections for image i
if webcam: # batch_size >= 1
p, s, im0 = path[i], '%g: ' % i, im0s[i].copy()
else:
p, s, im0 = path, '', im0s
save_path = str(Path(out) / Path(p).name)
s += '%gx%g ' % img.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
if det is not None and len(det):
# Rescale boxes from imgsz to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s += '%g %ss, ' % (n, names[int(c)]) # add to string
# Write results
for *xyxy, conf, cls in det:
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
with open(save_path[:save_path.rfind('.')] + '.txt', 'a') as file:
file.write(('%g ' * 5 + '\n') % (cls, *xywh)) # label format
if save_img or view_img: # Add bbox to image
label = '%s %.2f' % (names[int(cls)], conf)
plot_one_box(xyxy, im0, label=label, color=colors[int(cls)])
# Print time (inference + NMS)
print('%sDone. (%.3fs)' % (s, t2 - t1))
# Stream results
if view_img:
cv2.imshow(p, im0)
if cv2.waitKey(1) == ord('q'): # q to quit
raise StopIteration
# Save results (image with detections)
if save_img:
if dataset.mode == 'images':
cv2.imwrite(save_path, im0)
else:
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*opt.fourcc), fps, (w, h))
vid_writer.write(im0)
if save_txt or save_img:
print('Results saved to %s' % os.getcwd() + os.sep + out)
if platform == 'darwin': # MacOS
os.system('open ' + save_path)
print('Done. (%.3fs)' % (time.time() - t0))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='cfg/yolov3-spp.cfg', help='*.cfg path')
parser.add_argument('--names', type=str, default='data/coco.names', help='*.names path')
parser.add_argument('--weights', type=str, default='weights/yolov3-spp-ultralytics.pt', help='weights path')
parser.add_argument('--source', type=str, default='data/samples', help='source') # input file/folder, 0 for webcam
parser.add_argument('--output', type=str, default='output', help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=512, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.3, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v', help='output video codec (verify ffmpeg support)')
parser.add_argument('--half', action='store_true', help='half precision FP16 inference')
parser.add_argument('--device', default='', help='device id (i.e. 0 or 0,1) or cpu')
parser.add_argument('--view-img', action='store_true', help='display results')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--classes', nargs='+', type=int, help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true', help='augmented inference')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
opt.names = check_file(opt.names) # check file
print(opt)
with torch.no_grad():
detect()
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from .utils import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
def print(*args):
pass
def tqdm(it, **kwargs):
return it
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
class LoadImages: # for inference
def __init__(self, path, img_size=416):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=416):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:[email protected]/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:[email protected]/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:[email protected]:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=416):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=416, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, pad=0.0):
try:
path = str(Path(path)) # os-agnostic
parent = str(Path(path).parent) + os.sep
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
else:
raise Exception('%s does not exist' % path)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
raise Exception('Error loading data from %s. See %s' % (path, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Read image shapes (wh)
sp = path.replace('.txt', '') + '.shapes' # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
self.shapes = np.array(s, dtype=np.float64)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / 32. + pad).astype(np.int) * 32
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path # print string
x = np.load(np_labels_path, allow_pickle=True)
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
# np.savetxt(file, l, '%g') # save *.txt from *.npy file
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded and n > 1000:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
xc, yc = [int(random.uniform(s * 0.5, s * 1.5)) for _ in range(2)] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=-s // 2) # border to remove
return img4, labels4
def letterbox(img, new_shape=(416, 416), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=0):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border * 2
width = img.shape[1] + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[0] + border # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 4) & (h > 4) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 10)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.