python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import pickle
import glob
import os
import pdb
import math
import numpy as np
import scipy.stats
import itertools
import random
from random import shuffle
from collections import OrderedDict
#import normalization.positive_normalization
#from normalization import positive_normalization
#from normalization import *
#from label_lines import *
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
plt.ioff() #http://matplotlib.org/faq/usage_faq.html (interactive mode)
#mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = '6'
linewidth = '0.3'
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
label_fontsize = 3
linestyles = ['-', '--', '-.', '-', '-', '--', '-.', '-']
#colors = ["#659BC9", "#551a8b", "#e41a1c", "#377eb8"]
#101, 155, 201 is #659BC9. 236, 163, 57 is #ECA339
colors = ["#000000", "#e41a1c", "#377eb8", "#4daf4a", "#984ea3",
"#ff7f00", "#ffff33", "#a65628", "#f781bf"]
loss_type="test_errors"
ylim=(20, 70) #100-77
xlim=(0, 90)
figname = "finetuning_densenet.pdf"
runs = [
'/checkpoint/adefazio/opt/vr_densenet_resume/20191022_070415j60d2vd4/job-0/log/run123/current_runinfo.pkl',
'/checkpoint/adefazio/opt/vr_densenet_sweep2/20191018_113200kfr3mjvy/job-4/log/run123/current_runinfo.pkl',
#'/checkpoint/adefazio/opt/vr_densenet_sweep2/20191018_113200kfr3mjvy/job-5/log/run123/current_runinfo.pkl',
]
traces = []
plt.cla()
scalefactor = 0.85
fig = plt.figure(figsize=(scalefactor*3.3,scalefactor*2))
ax = fig.add_subplot(111)
ax.set_prop_cycle("color", colors)
plt.xlabel('Epoch')
plt.ylabel("Test error (%)")
legend_position = 'upper right'
idx = 0
for fname in runs:
print("(ALL) processing run ", fname)
with open(fname, 'rb') as fdata:
rd = pickle.load(fdata)
args = rd['args']
losses = rd[loss_type]
losses = losses[:xlim[1]]
losses = 100.0 - np.array(losses)
legend = f"from {args.vr_from_epoch:2d} ({min(losses):1.2f}%)"
if len(losses) > 0:
x = list(range(1,len(losses)+1))
y = losses.copy()
ax.plot(x, y, label=legend,
linestyle=linestyles[idx % len(linestyles)])
idx += 1
#pdb.set_trace()
print("Finalizing plot")
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
baseline = 23.22
ax.axhline(baseline, color="r", label=f"Baseline ({baseline:1.2f}%)")
ax.grid(False)
ax.xaxis.set_tick_params(direction='in')
ax.yaxis.set_tick_params(direction='in', right=True)
#labelLines(plt.gca().get_lines(), align=False, fontsize=label_fontsize, xvals=xvals)
ax.legend(fontsize=5, handlelength=2, loc=legend_position) #bbox_to_anchor=(1, 0.5)
fig.savefig(figname, bbox_inches='tight', pad_inches=0)
print("saved", figname)
| deep-variance-reduction-main | reproduce/plot_finetuning.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import run
archs = ['default', 'resnet-small', 'densenet-40-36', 'resnet110']
try:
pindex = int(sys.argv[1])
print(f"problem index {pindex}")
except:
pindex = 0
arch = archs[pindex]
runargs = {
'problem': 'cifar10',
'architecture': arch,
'method': "svrg",
'logfname': 'reproduce-variance-ratios-{}'.format(arch),
'momentum': 0.9,
'decay': 0.0001,
'lr': 0.1,
'lr_reduction': "150-225",
'batch_size': 128,
'epochs': 250,
'log_diagnostics': True,
}
run.run(runargs)
| deep-variance-reduction-main | reproduce/reproduce_ratio_plots.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import pickle
import glob
import os
import re
import numpy as np
import itertools
import random
from random import shuffle
import pdb
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
plt.ioff() #http://matplotlib.org/faq/usage_faq.html (interactive mode)
import numpy as np
import itertools
from random import shuffle
from matplotlib.ticker import FuncFormatter
from label_lines import *
run_dir = "runs"
plot_dir = "plots"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
#mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = '6'
linewidth = '0.3'
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
label_fontsize = 6
linestyles = itertools.cycle(('-', '--', '-.', ':'))
colors = ["#e41a1c", "#377eb8", "#4daf4a", "#984ea3",
"#ff7f00", "#ffff33", "#a65628", "#f781bf"]
def plot_dist(plot_name, data_files, labels, xvals):
ylabel = "Iterate distance"
epochs = []
plt.cla()
fig = plt.figure(figsize=(3.2,2))
ax = fig.add_subplot(111)
ax.set_prop_cycle("color", colors)
for fname, label in zip(data_files, labels):
print("(ALL) processing file ", fname)
with open(fname, 'rb') as fdata:
rd = pickle.load(fdata)
#pdb.set_trace()
if 'batch_indices' in rd:
print("Has batch indices")
# Calculate x axis for plotting
batch_indices = np.array(rd["batch_indices"])
nk = len(batch_indices)
if max(batch_indices) == min(batch_indices):
eval_points = np.array(range(nk))/nk
else:
eval_points = batch_indices/max(batch_indices)
epochs.append(rd["epoch"])
#pdb.set_trace()
var_points = rd["iterate_distances"]
ax.plot(eval_points, var_points, label=label)
# Only compared data from the same epoch
if len(set(epochs)) > 1:
print("More than one epoch encountered: {}".format(epochs))
print("Finalizing plot")
plt.xlabel('Progress within epoch')
plt.ylabel(ylabel)
plt.xlim([0.0, 1.0])
# Format x axis as percentage
def myfunc(x, pos=0):
return '%1.0f%%'%(100*x)
ax.xaxis.set_major_formatter(FuncFormatter(myfunc))
ax.grid(False)
ax.xaxis.set_tick_params(direction='in')
ax.yaxis.set_tick_params(direction='in', right="on")
#pdb.set_trace()
labelLines(plt.gca().get_lines(), align=False, fontsize=label_fontsize, xvals=xvals)
figname = "{}/{}.pdf".format(plot_dir, plot_name)
fig.savefig(figname, bbox_inches='tight', pad_inches=0)
print("saved", figname)
##################
plot_dist(plot_name = "variance_dist",
data_files = [
"data/variance-dist/default-small_tlockvariance_epoch3.pkl",
"data/variance-dist/densenet_tlockvariance_epoch3.pkl"],
labels = ["LeNet", "DenseNet"],
xvals= [0.65, 0.8])
| deep-variance-reduction-main | reproduce/plot_iterate_distance.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import run
methods = ["sgd", "recompute_svrg", "scsg"]
try:
pindex = int(sys.argv[1])
seed = int(sys.argv[2])
print(f"problem index {pindex}")
except:
pindex = 0
seed = 0
method = methods[pindex]
runargs = {
'method': method,
'seed': seed,
'problem': 'imagenet',
'architecture': 'resnet18',
'momentum': 0.9,
'lr': 0.1,
'decay': 0.0001,
'lr_reduction': "every30",
'batch_size': 256,
'epochs': 90,
'save_model': True,
'full_checkpointing': True,
'log_interval': 80,
}
run.run(runargs)
| deep-variance-reduction-main | reproduce/reproduce_test_error_imagenet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import run
methods = ["sgd", "recompute_svrg", "scsg"]
try:
pindex = int(sys.argv[1])
seed = int(sys.argv[2])
print(f"problem index {pindex}")
except:
pindex = 0
seed = 0
method = methods[pindex]
runargs = {
'problem': 'cifar10',
'architecture': 'resnet110', #'resnet110',
'method': method,
'seed': seed,
'momentum': 0.9,
'decay': 0.0001,
'lr': 0.05,
'lr_reduction': "150-225",
'batch_size': 128,
'epochs': 250,
'log_diagnostics': False,
'save_model': True,
}
run.run(runargs)
| deep-variance-reduction-main | reproduce/reproduce_test_error_resnet110.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import pickle
import glob
import os
import pdb
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
plt.ioff() #http://matplotlib.org/faq/usage_faq.html (interactive mode)
import numpy as np
import itertools
import scipy
import scipy.stats
from random import shuffle
from label_lines import *
#mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = '6'
linewidth = '0.3'
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
label_fontsize = 6
linestyles = ['-', '--', '-.', '-', '-', '--', '-.', '-']
colors = ["#000000", "#e41a1c", "#377eb8", "#4daf4a", "#984ea3",
"#ff7f00", "#ffff33", "#a65628", "#f781bf"]
def plot_averaged(plot_name, plot_entries, xvals, yrange=None):
run_dir = "runs"
plot_dir = "plots"
loss_key = "test_errors"
ylabel = "Test error (%)"
# Positions of the labels in x axis range, i.e. epoch number
#xvals
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
max_seen = 0
plt.cla()
fig = plt.figure(figsize=(3.3,2))
ax = fig.add_subplot(111)
ax.set_prop_cycle("color", colors)
#ax.set_prop_cycle("linestyle", linestyles)
line_idx = 0
for plot_entry in plot_entries:
fname_grob = plot_entry["fname"]
data_files = glob.glob(fname_grob)
if len(data_files) == 0:
raise Exception("No files found matching path: {}".format(fname_grob))
errors_lists = []
for fname in data_files:
print("(ALL) processing run ", fname)
with open(fname, 'rb') as fdata:
rd = pickle.load(fdata)
values = rd[loss_key]
# convert to errors
errors = [100.0 - val for val in values]
#pdb.set_trace()
#print("losses: {}".format(losses))
print("Final test error {} for {}".format(errors[-1], plot_entry["label"]))
# Remove outlier runs
if errors[-1] < 20.0:
errors_lists.append(errors.copy())
max_test_loss = max(errors)
if max_test_loss > max_seen:
max_seen = max_test_loss
max_epoch = len(errors)
## Aggregate and plots
n = len(errors_lists)
errors_avg = [0.0 for i in range(len(errors_lists[0]))]
errors_low = [0.0 for i in range(len(errors_lists[0]))]
errors_hi = [0.0 for i in range(len(errors_lists[0]))]
#pdb.set_trace()
# Apply a smoothing filter
box_pts = 10
box = np.ones(box_pts)/box_pts
for i in range(len(errors_lists)):
errors_lists[i] = np.convolve(errors_lists[i], box, mode='valid')
# Change from a list of runs to a list of epochs
errors = np.array(errors_lists).T.tolist()
for i in range(len(errors)):
sem = scipy.stats.sem(errors[i])
errors_avg[i] = np.mean(errors[i])
errors_low[i] = errors_avg[i] - sem
errors_hi[i] = errors_avg[i] + sem
x = range(len(errors_avg))
ax.plot(
x,
errors_avg,
label=plot_entry["label"],
linestyle=linestyles[line_idx]) #linestyle=next(linestyles)
ax.fill_between(x, errors_low, errors_hi, alpha=0.3)
line_idx += 1
print("Average final test error {} for {}".format(errors_avg[-1], plot_entry["label"]))
print("Finalizing plot")
plt.xlabel('Epoch')
plt.ylabel(ylabel)
plt.xlim([0, max_epoch-box_pts])
pdb.set_trace()
if yrange is not None:
plt.ylim(yrange)
else:
plt.ylim([0, max_seen])
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.6, box.height])
ax.grid(False)
ax.xaxis.set_tick_params(direction='in')
ax.yaxis.set_tick_params(direction='in', right="on")
labelLines(plt.gca().get_lines(), align=False, fontsize=label_fontsize, xvals=xvals)
#ax.legend(fontsize=5, handlelength=8, loc='center left', bbox_to_anchor=(1, 0.5))
figname = "{}/{}.pdf".format(plot_dir, plot_name)
fig.savefig(figname, bbox_inches='tight', pad_inches=0)
print("saved", figname)
plot_averaged(
plot_name="test_resnet110_V2",
plot_entries = [
{"fname": "runs/cifar10/*resnet110*scsg*.pkl", "label": "SCSG"},
{"fname": "runs/cifar10/*resnet110*sgd*.pkl", "label": "SGD"},
{"fname": "runs/cifar10/*resnet110*svrg*.pkl", "label": "SVRG"},
],
xvals=[175, 210, 100],
yrange=[6.0, 40.0])
| deep-variance-reduction-main | reproduce/plot_test_error_with_bars.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import run
methods = ["sgd", "recompute_svrg", "scsg"]
try:
pindex = int(sys.argv[1])
seed = int(sys.argv[2])
print(f"problem index {pindex}")
except:
pindex = 0
seed = 0
method = methods[pindex]
runargs = {
'problem': 'cifar10',
'architecture': 'default',
'method': method,
'seed': seed,
'momentum': 0.9,
'decay': 0.0001,
'lr': 0.1,
'lr_reduction': "150-225",
'batch_size': 128,
'epochs': 250,
'log_diagnostics': False,
'save_model': True,
}
run.run(runargs)
| deep-variance-reduction-main | reproduce/reproduce_test_error_lenet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import pickle
import glob
import os
import re
import matplotlib.ticker as plticker
import numpy as np
import itertools
import random
from random import shuffle
import pdb
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
plt.ioff() #http://matplotlib.org/faq/usage_faq.html (interactive mode)
import numpy as np
import itertools
from random import shuffle
from label_lines import *
run_dir = "runs"
plot_dir = "plots"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
#mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = '6'
linewidth = '0.3'
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
label_fontsize = 6
linestyles = itertools.cycle(('-', '--', '-.', ':'))
colors = ["black", "#e41a1c", "#377eb8", "#4daf4a", "#984ea3",
"#ff7f00", "#ffff33", "#a65628", "#f781bf"]
def plot_variance_ratios(plot_name, data_files_grob, xvals):
ylabel = "SVR Variance / SGD Variance"
keys_to_show = ['2%', '11%', '33%', '100%']
# Position of in-plot labels along the x axis
epochs = []
ratios = []
vr_variances = []
gradient_variances = []
trace_data = {}
data_files = glob.glob(data_files_grob)
def atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
return [ atoi(c) for c in re.split('(\d+)', text) ]
data_files.sort(key=natural_keys)
#pdb.set_trace()
for fname in data_files:
print("(ALL) processing file ", fname)
with open(fname, 'rb') as fdata:
rd = pickle.load(fdata)
#pdb.set_trace()
if 'batch_indices' in rd:
print("Has batch indices")
# Calculate x axis for plotting
batch_indices = np.array(rd["batch_indices"])
nk = len(batch_indices)
if max(batch_indices) == min(batch_indices):
eval_points = np.array(range(nk))/nk
else:
eval_points = batch_indices/max(batch_indices)
epochs.append(rd["epoch"])
#pdb.set_trace()
ratio_points = (np.array(rd["vr_step_variances"])/np.array(rd["gradient_variances"])).tolist()
for i, ep in enumerate(eval_points):
ep_name = "{0:.0f}%".format(100*ep)
if ep_name not in trace_data.keys():
trace_data[ep_name] = [ratio_points[i]]
else:
trace_data[ep_name].append(ratio_points[i])
plt.cla()
fig = plt.figure(figsize=(3.2,2))
ax = fig.add_subplot(111)
ax.set_prop_cycle("color", colors)
#pdb.set_trace()
for ep_name, data in trace_data.items():
if ep_name in keys_to_show:
ax.plot(epochs, data, ".",
label=ep_name) #, linestyle=next(linestyles))
if ep_name == "100%":
print("100p epochs:", epochs)
print("ratios: ", data)
print("Finalizing plot")
plt.xlabel('Epoch')
plt.ylabel(ylabel)
ax.set_yscale("log", basey=2)
ax.set_yticks([2**(-i) for i in range(0, 11)])
plt.ylim([1e-3, 3])
plt.xlim([0.0, 240])
# Horizontal line at 1
#plt.axhline(y=1.0, color="#000000", linestyle='--')
#plt.axhline(y=2.0, color="#000000", linestyle='--')
ax.axhspan(1, 2, alpha=0.3, facecolor='red', edgecolor=None)
# Step size reduction indicators
plt.axvline(x=150.0, color="brown", linestyle='--')
plt.axvline(x=220.0, color="brown", linestyle='--')
#loc = plticker.LogLocator(base=2.0)
#ax.yaxis.set_major_locator(loc)
#plt.tick_params(axis='y', which='minor')
ax.grid(False)
ax.xaxis.set_tick_params(direction='in')
ax.yaxis.set_tick_params(direction='in', right="on")
labelLines(plt.gca().get_lines(), align=False, fontsize=label_fontsize, xvals=xvals)
figname = "{}/{}.pdf".format(plot_dir, plot_name)
fig.savefig(figname, bbox_inches='tight', pad_inches=0)
print("saved", figname)
################## xvals are low percentages to high
plot_variance_ratios(plot_name = "variance_ratios_densenet",
data_files_grob = "data/variance1/var-*.pkl", xvals = [200, 200, 200, 200])
plot_variance_ratios(plot_name = "variance_ratios_lenet",
data_files_grob = "data/variance-lenet/*.pkl", xvals = [210, 200, 190, 210])
plot_variance_ratios(plot_name = "variance_ratios_small-resnet",
data_files_grob = "data/variance-small-resnet/*.pkl", xvals = [180, 180, 180, 210])
plot_variance_ratios(plot_name = "variance_ratios_resnet110",
data_files_grob = "data/variance-resnet110/*.pkl", xvals = [180, 180, 180, 210])
# Soft versions
if True:
plot_variance_ratios(plot_name = "soft_variance_ratios_densenet",
data_files_grob = "data/variance-soft/*densenet*.pkl", xvals = [200, 200, 200, 200])
plot_variance_ratios(plot_name = "soft_variance_ratios_lenet",
data_files_grob = "data/variance-soft/*default*.pkl", xvals = [210, 200, 190, 210])
plot_variance_ratios(plot_name = "soft_variance_ratios_small-resnet",
data_files_grob = "data/variance-soft/*resnet*.pkl", xvals = [180, 180, 180, 210])
| deep-variance-reduction-main | reproduce/plot_variance_ratio.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import run
transform_locking = [True, False]
try:
pindex = int(sys.argv[1])
print(f"problem index {pindex}")
except:
pindex = 0
locking = transform_locking[pindex]
runargs = {
'transform_locking': locking,
'problem': 'cifar10',
'architecture': 'default',
'method': "svrg",
'logfname': 'reproduce-transform-locking-{}'.format(locking),
'momentum': 0.9,
'decay': 0.0001,
'lr': 0.1,
'lr_reduction': "150-225",
'batch_size': 128,
'epochs': 3,
'log_diagnostics': True,
'log_diagnostics_every_epoch': True,
'log_diagnostics_deciles': True,
}
run.run(runargs)
| deep-variance-reduction-main | reproduce/reproduce_locking_plot.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import pickle
import glob
import os
import pdb
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
plt.ioff() #http://matplotlib.org/faq/usage_faq.html (interactive mode)
import numpy as np
import itertools
from random import shuffle
from label_lines import *
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = '6'
linewidth = '0.3'
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
label_fontsize = 6
linestyles = ['-', '--', '-.', '-', '-', '--', '-.', '-']
colors = ["#000000", "#e41a1c", "#377eb8", "#4daf4a", "#984ea3",
"#ff7f00", "#ffff33", "#a65628", "#f781bf"]
def plot_averaged(plot_name, plot_entries, xvals, yrange=None):
run_dir = "runs"
plot_dir = "plots"
#plot_name = "test_error1"
loss_key = "test_errors"
ylabel = "Test error (%)"
# Positions of the labels in x axis range, i.e. epoch number
#xvals
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
max_seen = 0
plt.cla()
fig = plt.figure(figsize=(3.3,2))
ax = fig.add_subplot(111)
ax.set_prop_cycle("color", colors)
#ax.set_prop_cycle("linestyle", linestyles)
line_idx = 0
for plot_entry in plot_entries:
fname_grob = plot_entry["fname"]
data_files = glob.glob(fname_grob)
if len(data_files) == 0:
raise Exception("No files found matching path: {}".format(fname_grob))
errors_lists = []
for fname in data_files:
print("(ALL) processing run ", fname)
with open(fname, 'rb') as fdata:
rd = pickle.load(fdata)
values = rd[loss_key]
# convert to errors
errors = [100.0 - val for val in values]
#pdb.set_trace()
#print("losses: {}".format(losses))
print("Final test error {} for {}".format(errors[-1], plot_entry["label"]))
errors_lists.append(errors.copy())
max_test_loss = max(errors)
if max_test_loss > max_seen:
max_seen = max_test_loss
max_epoch = len(errors)
## Aggregate and plots
n = len(errors_lists)
errors_avg = [0.0 for i in range(len(errors_lists[0]))]
for i in range(n):
for j in range(len(errors_avg)):
errors_avg[j] += float(errors_lists[i][j]/n)
#pdb.set_trace()
ax.plot(
range(len(errors_avg)),
errors_avg,
label=plot_entry["label"],
linestyle=linestyles[line_idx]) #linestyle=next(linestyles)
line_idx += 1
print("Average final test error {} for {}".format(errors_avg[-1], plot_entry["label"]))
print("Finalizing plot")
plt.xlabel('Epoch')
plt.ylabel(ylabel)
plt.xlim([0, max_epoch])
if yrange is not None:
plt.ylim(yrange)
else:
plt.ylim([0, max_seen])
#box = ax.get_position()
#ax.set_position([box.x0, box.y0, box.width * 0.6, box.height])
ax.grid(False)
ax.xaxis.set_tick_params(direction='in')
ax.yaxis.set_tick_params(direction='in', right="on")
labelLines(plt.gca().get_lines(), align=False, fontsize=label_fontsize, xvals=xvals)
#ax.legend(fontsize=5, handlelength=8, loc='center left', bbox_to_anchor=(1, 0.5))
figname = "{}/{}.pdf".format(plot_dir, plot_name)
fig.savefig(figname, bbox_inches='tight', pad_inches=0)
print("saved", figname)
#######################################################
######################################################
plot_averaged(
plot_name="test_resnet110_V2",
plot_entries = [
{"fname": "runs/cifar10/*resnet110*scsg*.pkl", "label": "SCSG"},
{"fname": "runs/cifar10/*resnet110*sgd*.pkl", "label": "SGD"},
{"fname": "runs/cifar10/*resnet110*svrg*.pkl", "label": "SVRG"},
],
xvals=[175, 210, 250],
yrange=[0, 40.0])
# plot_averaged(
# plot_name="test_lenet",
# plot_entries = [
# {"fname": "data/runs-large/cifar10-default-scsg-m0_9d0_0001lr0_1sl1e-06epochs300bs128pb_Falseis_10drop_Falsebn_Truereduct_150-225seed_*.pkl", "label": "SCSG"},
# {"fname": "data/runs-large/cifar10-default-sgd-m0_9d0_0001lr0_1sl1e-06epochs300bs128pb_Falseis_10drop_Falsebn_Truereduct_150-225seed_*.pkl", "label": "SGD"},
# {"fname": "data/runs-large/cifar10-default-svrg-lr0_1-m0_9-d0_0001-epochs300bs128drop_Falsebn_Truevr_from_1bn_recal_Truereduct_150-225seed_*.pkl", "label": "SVRG"},
# ],
# xvals=[175, 210, 250],
# yrange=[20.0, 40.0])
# plot_averaged(
# plot_name="test_imagenet",
# plot_entries = [
# {"fname": "data/imagenet-vr/*scsg*.pkl", "label": "SCSG"},
# {"fname": "data/imagenet-sgd/*.pkl", "label": "SGD"},
# {"fname": "data/imagenet-vr/*svrg*.pkl", "label": "SVRG"},
# ],
# xvals=[22, 45, 10],
# yrange=[30.0, 70.0])
| deep-variance-reduction-main | reproduce/plot_test_error.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import print_function
import pickle
import glob
import os
import re
import numpy as np
import itertools
import random
from random import shuffle
import pdb
import matplotlib as mpl
mpl.use('agg')
import matplotlib.pyplot as plt
plt.ioff() #http://matplotlib.org/faq/usage_faq.html (interactive mode)
import numpy as np
import itertools
from random import shuffle
from matplotlib.ticker import FuncFormatter
from label_lines import *
run_dir = "runs"
plot_dir = "plots"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
#mpl.rcParams['mathtext.fontset'] = 'cm'
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.size'] = '6'
linewidth = '0.3'
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.linewidth'] = linewidth
mpl.rcParams['xtick.major.width'] = linewidth
mpl.rcParams['ytick.major.width'] = linewidth
label_fontsize = 6
linestyles = itertools.cycle(('-', '--', '-.', ':'))
colors = ["#e41a1c", "#377eb8", "#4daf4a", "#984ea3",
"#ff7f00", "#ffff33", "#a65628", "#f781bf"]
def plot_variance_raw(plot_name, data_files, labels):
ylabel = "SVRG Variance"
xvals = [0.7, 0.7] # Position of in-plot labels along the x axis
epochs = []
plt.cla()
fig = plt.figure(figsize=(3.2,2))
ax = fig.add_subplot(111)
ax.set_prop_cycle("color", colors)
for fname, label in zip(data_files, labels):
print("(ALL) processing file ", fname)
with open(fname, 'rb') as fdata:
rd = pickle.load(fdata)
#pdb.set_trace()
if 'batch_indices' in rd:
print("Has batch indices")
# Calculate x axis for plotting
batch_indices = np.array(rd["batch_indices"])
nk = len(batch_indices)
if max(batch_indices) == min(batch_indices):
eval_points = np.array(range(nk))/nk
else:
eval_points = batch_indices/max(batch_indices)
epochs.append(rd["epoch"])
#pdb.set_trace()
var_points = rd["vr_step_variances"]
#pdb.set_trace()
ax.plot(eval_points, var_points, label=label)
# Only compared data from the same epoch
if len(set(epochs)) > 1:
print("More than one epoch encountered: {}".format(epochs))
print("Finalizing plot")
plt.xlabel('Progress within epoch')
plt.ylabel(ylabel)
plt.ylim([0, 0.7])
plt.xlim([0.0, 1.0])
# Format x axis as percentage
def myfunc(x, pos=0):
return '%1.0f%%'%(100*x)
ax.xaxis.set_major_formatter(FuncFormatter(myfunc))
ax.grid(False)
ax.xaxis.set_tick_params(direction='in')
ax.yaxis.set_tick_params(direction='in', right="on")
labelLines(plt.gca().get_lines(), align=False, fontsize=label_fontsize, xvals=xvals)
figname = "{}/{}.pdf".format(plot_dir, plot_name)
fig.savefig(figname, bbox_inches='tight', pad_inches=0)
print("saved", figname)
##################
plot_variance_raw(plot_name = "variance_transform",
data_files = [
"data/variance-locking/default-lenet-shortvariance_epoch3.pkl",
"data/variance-locking/default-lenet-short_tlockvariance_epoch3.pkl",
],
labels = ["No locking", "Transform locking"])
| deep-variance-reduction-main | reproduce/plot_transform_locking.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
class Fab_HDD():
def __init__(self, config="BarraCuda"):
###############################
# Carbon per capacity
###############################
with open("hdd/hdd_consumer.json", 'r') as f:
hdd_config = json.load(f)
with open("hdd/hdd_enterprise.json", 'r') as f:
hdd_config.update(json.load(f))
assert config in hdd_config.keys() and "HDD configuration not found"
self.carbon_per_gb = hdd_config[config]
self.carbon = 0
return
def get_cpg(self, ):
return self.carbon_per_gb
def set_capacity(self, capacity):
self.capacity = capacity
self.carbon = carbon_per_gb
return
def get_carbon(self, ):
return self.carbon
| ACT-main | hdd_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
class Fab_Logic():
def __init__(self, process_node=14,
gpa="97",
carbon_intensity="loc_taiwan",
debug=False,
fab_yield=0.875):
self.debug = debug
###############################
# Energy per unit area
###############################
with open("logic/epa.json", 'r') as f:
epa_config = json.load(f)
###############################
# Raw materials per unit area
###############################
with open("logic/materials.json", 'r') as f:
materials_config = json.load(f)
###############################
# Gasses per unit area
###############################
if gpa == "95":
with open("logic/gpa_95.json", 'r') as f:
gpa_config = json.load(f)
elif gpa == "99":
with open("logic/gpa_99.json", 'r') as f:
gpa_config = json.load(f)
elif gpa == "97":
with open("logic/gpa_95.json", 'r') as f:
gpa_95_config = json.load(f)
with open("logic/gpa_99.json", 'r') as f:
gpa_99_config = json.load(f)
gpa_config = {}
for c in gpa_95_config.keys():
gas = (gpa_95_config[c] + gpa_99_config[c]) / 2.
gpa_config[c] = gas
else:
print("Error: Unsupported GPA value for FAB logic")
sys.exit()
###############################
# Carbon intensity of fab
###############################
if "loc" in carbon_intensity:
with open("carbon_intensity/location.json", 'r') as f:
loc_configs = json.load(f)
loc = carbon_intensity.replace("loc_", "")
assert loc in loc_configs.keys()
fab_ci = loc_configs[loc]
elif "src" in carbon_intensity:
with open("carbon_intensity/source.json", 'r') as f:
src_configs = json.load(f)
src = carbon_intensity.replace("src_", "")
assert src in src_configs.keys()
fab_ci = src_configs[src]
else:
print("Error: Carbon intensity must either be loc | src dependent")
sys.exit()
###############################
# Aggregating model
###############################
process_node = str(process_node) + "nm"
assert process_node in epa_config.keys()
assert process_node in gpa_config.keys()
assert process_node in materials_config.keys()
carbon_energy = fab_ci * epa_config[process_node]
carbon_gas = gpa_config[process_node]
carbon_materials = materials_config[process_node]
self.carbon_per_area = (carbon_energy + carbon_gas + carbon_materials)
self.carbon_per_area = self.carbon_per_area / fab_yield
if self.debug:
print("[Fab logic] Carbon/area from energy consumed" , carbon_energy)
print("[Fab logic] Carbon/area from gasses" , carbon_gas)
print("[Fab logic] Carbon/area from materials" , carbon_materials)
print("[Fab logic] Carbon/area aggregate" , self.carbon_per_area)
self.carbon = 0
return
def get_cpa(self,):
return self.carbon_per_area
def set_area(self, area):
self.area = area
self.carbon = self.area * self.carbon_per_area
def get_carbon(self, ):
return self.carbon
| ACT-main | logic_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
from dram_model import Fab_DRAM
from ssd_model import Fab_SSD
from logic_model import Fab_Logic
def main():
Fab_DRAM(config="ddr4_10nm")
Fab_SSD(config="nand_10nm")
Fab_Logic(gpa="95", carbon_intensity = "src_coal", debug=True,
process_node=10)
# Fab_Logic(gpa="97", carbon_intensity = "loc_taiwan", debug=True,
# process_node=14)
if __name__=="__main__":
main()
| ACT-main | model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
class Fab_DRAM():
def __init__(self, config = "ddr4_10nm", fab_yield=0.875):
###############################
# Carbon per capacity
###############################
with open("dram/dram_hynix.json", 'r') as f:
dram_config = json.load(f)
assert config in dram_config.keys() and "DRAM configuration not found"
self.fab_yield = fab_yield
self.carbon_per_gb = dram_config[config] / self.fab_yield
self.carbon = 0
def get_cpg(self, ):
return self.carbon_per_gb
def set_capacity(self, capacity):
self.capacity = capacity
self.carbon = self.carbon_per_gb * self.capacity
return
def get_carbon(self, ):
return self.carbon
| ACT-main | dram_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
class Fab_SSD():
def __init__(self, config="nand_10nm", fab_yield=0.875):
###############################
# Carbon per capacity
###############################
with open("ssd/ssd_hynix.json", 'r') as f:
ssd_config = json.load(f)
with open("ssd/ssd_seagate.json", 'r') as f:
ssd_config.update(json.load(f))
with open("ssd/ssd_western.json", 'r') as f:
ssd_config.update(json.load(f))
assert config in ssd_config.keys() and "SSD configuration not found"
self.fab_yield = fab_yield
self.carbon_per_gb = ssd_config[config] / self.fab_yield
self.carbon = 0
return
def get_cpg(self, ):
return self.carbon_per_gb
def set_capacity(self, capacity):
self.capacity = capacity
self.carbon = self.carbon_per_gb * self.capacity
return
def get_carbon(self, ):
return self.carbon
| ACT-main | ssd_model.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
from dram_model import Fab_DRAM
from hdd_model import Fab_HDD
from ssd_model import Fab_SSD
from logic_model import Fab_Logic
debug = False
##############################
# Original Dell 740 LCA
##############################
#https://corporate.delltechnologies.com/content/dam/digitalassets/active/en/unauth/data-sheets/products/servers/lca_poweredge_r740.pdf
##############################
# Main Dell R740 integrated circuits
##############################
dellr740_large_ssd = 3840 # GB (3.84 TB x 8 SSD's)
dellr740_ssd = 400 # GB (400GB x 1 SSD)
dellr740_ssd_dram = 68 # GB (64 + 4GB ECC)
dellr740_dram = 36 # GB (32 + 4 ECC GB x 12)
ic_yield = 0.875
cpu_area = 6.98 #cm^2
##############################
# Estimated process technology node to mimic fairphone LCA process node
##############################
CPU_Logic = Fab_Logic(gpa = "95",
carbon_intensity = "src_coal",
process_node = 28,
fab_yield=ic_yield)
SSD_main = Fab_SSD(config = "nand_30nm", fab_yield = ic_yield)
SSD_secondary = Fab_SSD(config = "nand_30nm", fab_yield = ic_yield)
DRAM_SSD_main = Fab_DRAM(config = "ddr3_50nm", fab_yield = ic_yield)
DRAM_SSD_secondary = Fab_DRAM(config = "ddr3_50nm", fab_yield = ic_yield)
DRAM = Fab_DRAM(config = "ddr3_50nm", fab_yield = ic_yield)
##############################
# Computing carbon footprint of IC's
##############################
CPU_Logic.set_area(cpu_area)
DRAM.set_capacity(dellr740_dram)
DRAM_SSD_main.set_capacity(dellr740_ssd_dram)
SSD_main.set_capacity(dellr740_large_ssd)
DRAM_SSD_secondary.set_capacity(dellr740_ssd_dram)
SSD_secondary.set_capacity(dellr740_ssd)
##################################
# Computing the packaging footprint
##################################
# number of packages
ssd_main_nr = 12 + 1
ssd_secondary_nr = 12 + 1
dram_nr = 18 + 1
cpu_nr = 2
packaging_intensity = 150 # gram CO2
SSD_main_packaging = packaging_intensity * ssd_main_nr
SSD_secondary_packaging = packaging_intensity * ssd_secondary_nr
DRAM_packging = packaging_intensity * dram_nr
CPU_packaging = packaging_intensity * cpu_nr
total_packaging = SSD_main_packaging + \
SSD_secondary_packaging + \
DRAM_packging + \
CPU_packaging
total_packaging = total_packaging / 1000.
##################################
# Compute end-to-end carbon footprints
##################################
SSD_main_count = 8 # There are 8x3.84TB SSD's
SSD_main_co2 = (SSD_main.get_carbon() + \
DRAM_SSD_main.get_carbon() + \
SSD_main_packaging) / 1000.
SSD_main_co2 = SSD_main_co2 * SSD_main_count
SSD_secondary_count = 1 # There are 1x400GB SSD's
SSD_secondary_co2 = (SSD_secondary.get_carbon() + \
DRAM_SSD_secondary.get_carbon() + \
SSD_secondary_packaging) / 1000.
SSD_secondary_co2 = SSD_secondary_co2 * SSD_secondary_count
DRAM_count = 12 # There are 12 x (32GB+4GB ECC DRAM modules)
DRAM_co2 = (DRAM.get_carbon() + DRAM_packging) / 1000. * DRAM_count
CPU_count = 2
CPU_co2 = (CPU_Logic.get_carbon() + CPU_packaging) * CPU_count / 1000.
if debug:
print("ACT SSD main", SSD_main_co2, "kg CO2")
print("ACT SSD secondary", SSD_secondary_co2, "kg CO2")
print("ACT DRAM", DRAM_co2, "kg CO2")
print("ACT CPU", CPU_co2, "kg CO2")
print("ACT Packaging", total_packaging, "kg CO2")
print("--------------------------------")
print("ACT SSD main", SSD_main_co2, "kg CO2 vs. LCA 3373 kg CO2")
print("ACT SSD secondary", SSD_secondary_co2, "kg CO2 vs. LCA 64.1 kg CO2")
print("ACT DRAM", DRAM_co2, "kg CO2 vs. LCA 533 kg CO2")
print("ACT CPU", CPU_co2, "kg CO2 vs. LCA 47 kg CO2")
| ACT-main | exps/dellr740/dellr740.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import sys
from dram_model import Fab_DRAM
from hdd_model import Fab_HDD
from ssd_model import Fab_SSD
from logic_model import Fab_Logic
debug = False
# Main Fairphone integrated circuits
fairphone3_ICs = ["IC analog switch",
"LED Flash",
"LED Flash",
"CMOS image sensor",
"Light sensor",
"Light sensor",
"LED Full Color",
"Image sensor",
"I.C WLAN",
"I.C WLAN",
"Audio power amplifier",
"IC analog switch",
"IC power amplifier",
"IC PMU",
"IC PMU",
"IC PMU",
"Sensor",
"NFC Microcontroller",
"IC transceiver",
"IC audio power",
]
# Main Fairphone integrated circuits' areas in mm^2
fairphone3_IC_areas = [0.85,
1.2,
1.2,
35,
0.89,
0.08,
0.25,
18,
11.6,
1.44,
12.96,
1.61,
6.3,
26.88,
0.77,
11.36,
7,
8.69,
11,
9.6]
fairphone_cpu_area = 46.4 #mm^2
fairphone_ram = 4 # GB
fairphone_storage = 64 # GB
ic_yield = 0.875
##################################
# Estimated process technology node to mimic fairphone LCA process node
# This initializes ACT with an older technology node.
##################################
# IC Logic node
IC_Logic = Fab_Logic(gpa = "95",
carbon_intensity = "src_coal",
process_node = 28,
fab_yield=ic_yield)
# CPU Application processor node
CPU_Logic = Fab_Logic(gpa = "95",
carbon_intensity = "src_coal",
process_node = 28,
fab_yield=ic_yield)
# DRAM Logic node
DRAM = Fab_DRAM(config = "ddr3_50nm", fab_yield=ic_yield)
# SSD Logic node
SSD = Fab_SSD(config = "nand_30nm", fab_yield=ic_yield)
##################################
# Computing the IC footprint
##################################
IC_Logic.set_area(sum(fairphone3_IC_areas)/100.)
CPU_Logic.set_area(fairphone_cpu_area/100.)
DRAM.set_capacity(fairphone_ram)
SSD.set_capacity(fairphone_storage)
##################################
# Computing the packaging footprint
##################################
#Number of packages
nr = len(fairphone3_ICs) + 1 + 1 + 1 # Fairphone ICs + CPU + DRAM + SSD
packaging_intensity = 150 # gram CO2
PackagingFootprint = nr * packaging_intensity
if debug:
print("ACT IC", IC_Logic.get_carbon(), "g CO2")
print("ACT CPU", CPU_Logic.get_carbon(), "g CO2")
print("ACT DRAM", DRAM.get_carbon(), "g CO2")
print("ACT SSD", SSD.get_carbon(), "g CO2")
print("ACT Packaging", PackagingFootprint, "g CO2")
print("--------------------------------")
ram_flash = (DRAM.get_carbon() + SSD.get_carbon() + packaging_intensity * 2) / 1000.
fairphone_ram_flash = 11
print("ACT RAM + Flash", ram_flash, "kg CO2 vs. LCA", fairphone_ram_flash, "kg CO2")
cpu = (CPU_Logic.get_carbon() + packaging_intensity) / 1000.
fairphone_cpu = 1.07
print("ACT CPU", cpu, "kg CO2 vs. LCA", fairphone_cpu, "kg CO2")
ics = (IC_Logic.get_carbon() + packaging_intensity * len(fairphone3_ICs)) / 1000.
fairphone_ics = 5.3
print("ACT ICs", ics, "kg CO2 vs. LCA", fairphone_ics, "kg CO2")
| ACT-main | exps/fairphone3/fairphone3.py |
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages, setup
VERSION = '0.1.7'
DESCRIPTION = 'diffwave'
AUTHOR = 'LMNT, Inc.'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://www.lmnt.com'
LICENSE = 'Apache 2.0'
KEYWORDS = ['diffwave machine learning neural vocoder tts speech']
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
]
setup(name = 'diffwave',
version = VERSION,
description = DESCRIPTION,
long_description = open('README.md', 'r').read(),
long_description_content_type = 'text/markdown',
author = AUTHOR,
author_email = AUTHOR_EMAIL,
url = URL,
license = LICENSE,
keywords = KEYWORDS,
packages = find_packages('src'),
package_dir = { '': 'src' },
install_requires = [
'numpy',
'torch>=1.6',
#'torchaudio>=0.7.0',
'torchaudio==0.7.0',
'tqdm'
],
classifiers = CLASSIFIERS)
| diffwave-master | setup.py |
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def override(self, attrs):
if isinstance(attrs, dict):
self.__dict__.update(**attrs)
elif isinstance(attrs, (list, tuple, set)):
for attr in attrs:
self.override(attr)
elif attrs is not None:
raise NotImplementedError
return self
params = AttrDict(
# Training params
batch_size=16,
learning_rate=2e-4,
max_grad_norm=None,
# Data params
sample_rate=22050,
n_mels=80,
n_fft=1024,
hop_samples=256,
crop_mel_frames=62, # Probably an error in paper.
# Model params
residual_layers=30,
residual_channels=64,
dilation_cycle_length=10,
unconditional = False,
noise_schedule=np.linspace(1e-4, 0.05, 50).tolist(),
inference_noise_schedule=[0.0001, 0.001, 0.01, 0.05, 0.2, 0.5],
# unconditional sample len
audio_len = 22050*5, # unconditional_synthesis_samples
)
| diffwave-master | src/diffwave/params.py |
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import torch
import torchaudio as T
import torchaudio.transforms as TT
from argparse import ArgumentParser
from concurrent.futures import ProcessPoolExecutor
from glob import glob
from tqdm import tqdm
from diffwave.params import params
def transform(filename):
if T.__version__ > '0.7.0':
audio, sr = T.load(filename)
audio = torch.clamp(audio[0], -1.0, 1.0)
else:
audio, sr = T.load_wav(filename)
audio = torch.clamp(audio[0] / 32767.5, -1.0, 1.0)
if params.sample_rate != sr:
raise ValueError(f'Invalid sample rate {sr}.')
mel_args = {
'sample_rate': sr,
'win_length': params.hop_samples * 4,
'hop_length': params.hop_samples,
'n_fft': params.n_fft,
'f_min': 20.0,
'f_max': sr / 2.0,
'n_mels': params.n_mels,
'power': 1.0,
'normalized': True,
}
mel_spec_transform = TT.MelSpectrogram(**mel_args)
with torch.no_grad():
spectrogram = mel_spec_transform(audio)
spectrogram = 20 * torch.log10(torch.clamp(spectrogram, min=1e-5)) - 20
spectrogram = torch.clamp((spectrogram + 100) / 100, 0.0, 1.0)
np.save(f'{filename}.spec.npy', spectrogram.cpu().numpy())
def main(args):
filenames = glob(f'{args.dir}/**/*.wav', recursive=True)
with ProcessPoolExecutor() as executor:
list(tqdm(executor.map(transform, filenames), desc='Preprocessing', total=len(filenames)))
if __name__ == '__main__':
parser = ArgumentParser(description='prepares a dataset to train DiffWave')
parser.add_argument('dir',
help='directory containing .wav files for training')
main(parser.parse_args())
| diffwave-master | src/diffwave/preprocess.py |
diffwave-master | src/diffwave/__init__.py |
|
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt
Linear = nn.Linear
ConvTranspose2d = nn.ConvTranspose2d
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
@torch.jit.script
def silu(x):
return x * torch.sigmoid(x)
class DiffusionEmbedding(nn.Module):
def __init__(self, max_steps):
super().__init__()
self.register_buffer('embedding', self._build_embedding(max_steps), persistent=False)
self.projection1 = Linear(128, 512)
self.projection2 = Linear(512, 512)
def forward(self, diffusion_step):
if diffusion_step.dtype in [torch.int32, torch.int64]:
x = self.embedding[diffusion_step]
else:
x = self._lerp_embedding(diffusion_step)
x = self.projection1(x)
x = silu(x)
x = self.projection2(x)
x = silu(x)
return x
def _lerp_embedding(self, t):
low_idx = torch.floor(t).long()
high_idx = torch.ceil(t).long()
low = self.embedding[low_idx]
high = self.embedding[high_idx]
return low + (high - low) * (t - low_idx)
def _build_embedding(self, max_steps):
steps = torch.arange(max_steps).unsqueeze(1) # [T,1]
dims = torch.arange(64).unsqueeze(0) # [1,64]
table = steps * 10.0**(dims * 4.0 / 63.0) # [T,64]
table = torch.cat([torch.sin(table), torch.cos(table)], dim=1)
return table
class SpectrogramUpsampler(nn.Module):
def __init__(self, n_mels):
super().__init__()
self.conv1 = ConvTranspose2d(1, 1, [3, 32], stride=[1, 16], padding=[1, 8])
self.conv2 = ConvTranspose2d(1, 1, [3, 32], stride=[1, 16], padding=[1, 8])
def forward(self, x):
x = torch.unsqueeze(x, 1)
x = self.conv1(x)
x = F.leaky_relu(x, 0.4)
x = self.conv2(x)
x = F.leaky_relu(x, 0.4)
x = torch.squeeze(x, 1)
return x
class ResidualBlock(nn.Module):
def __init__(self, n_mels, residual_channels, dilation, uncond=False):
'''
:param n_mels: inplanes of conv1x1 for spectrogram conditional
:param residual_channels: audio conv
:param dilation: audio conv dilation
:param uncond: disable spectrogram conditional
'''
super().__init__()
self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
self.diffusion_projection = Linear(512, residual_channels)
if not uncond: # conditional model
self.conditioner_projection = Conv1d(n_mels, 2 * residual_channels, 1)
else: # unconditional model
self.conditioner_projection = None
self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
def forward(self, x, diffusion_step, conditioner=None):
assert (conditioner is None and self.conditioner_projection is None) or \
(conditioner is not None and self.conditioner_projection is not None)
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
y = x + diffusion_step
if self.conditioner_projection is None: # using a unconditional model
y = self.dilated_conv(y)
else:
conditioner = self.conditioner_projection(conditioner)
y = self.dilated_conv(y) + conditioner
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / sqrt(2.0), skip
class DiffWave(nn.Module):
def __init__(self, params):
super().__init__()
self.params = params
self.input_projection = Conv1d(1, params.residual_channels, 1)
self.diffusion_embedding = DiffusionEmbedding(len(params.noise_schedule))
if self.params.unconditional: # use unconditional model
self.spectrogram_upsampler = None
else:
self.spectrogram_upsampler = SpectrogramUpsampler(params.n_mels)
self.residual_layers = nn.ModuleList([
ResidualBlock(params.n_mels, params.residual_channels, 2**(i % params.dilation_cycle_length), uncond=params.unconditional)
for i in range(params.residual_layers)
])
self.skip_projection = Conv1d(params.residual_channels, params.residual_channels, 1)
self.output_projection = Conv1d(params.residual_channels, 1, 1)
nn.init.zeros_(self.output_projection.weight)
def forward(self, audio, diffusion_step, spectrogram=None):
assert (spectrogram is None and self.spectrogram_upsampler is None) or \
(spectrogram is not None and self.spectrogram_upsampler is not None)
x = audio.unsqueeze(1)
x = self.input_projection(x)
x = F.relu(x)
diffusion_step = self.diffusion_embedding(diffusion_step)
if self.spectrogram_upsampler: # use conditional model
spectrogram = self.spectrogram_upsampler(spectrogram)
skip = None
for layer in self.residual_layers:
x, skip_connection = layer(x, diffusion_step, spectrogram)
skip = skip_connection if skip is None else skip_connection + skip
x = skip / sqrt(len(self.residual_layers))
x = self.skip_projection(x)
x = F.relu(x)
x = self.output_projection(x)
return x
| diffwave-master | src/diffwave/model.py |
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import random
import torch
import torchaudio
from glob import glob
from torch.utils.data.distributed import DistributedSampler
import torch.nn.functional as F
class ConditionalDataset(torch.utils.data.Dataset):
def __init__(self, paths):
super().__init__()
self.filenames = []
for path in paths:
self.filenames += glob(f'{path}/**/*.wav', recursive=True)
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
audio_filename = self.filenames[idx]
spec_filename = f'{audio_filename}.spec.npy'
if torchaudio.__version__ > '0.7.0':
signal, _ = torchaudio.load(audio_filename)
else:
signal, _ = torchaudio.load_wav(audio_filename)
spectrogram = np.load(spec_filename)
# https://github.com/lmnt-com/diffwave/issues/15
out = signal[0] if torchaudio.__version__ > '0.7.0' else signal[0] / 32767.5
return {
'audio': out,
'spectrogram': spectrogram.T
}
class UnconditionalDataset(torch.utils.data.Dataset):
def __init__(self, paths):
super().__init__()
self.filenames = []
for path in paths:
self.filenames += glob(f'{path}/**/*.wav', recursive=True)
def __len__(self):
return len(self.filenames)
def __getitem__(self, idx):
audio_filename = self.filenames[idx]
spec_filename = f'{audio_filename}.spec.npy'
if torchaudio.__version__ > '0.7.0':
signal, _ = torchaudio.load(audio_filename)
else:
signal, _ = torchaudio.load_wav(audio_filename)
out = signal[0] if torchaudio.__version__ > '0.7.0' else signal[0] / 32767.5
return {
'audio': out,
'spectrogram': None
}
class Collator:
def __init__(self, params):
self.params = params
def collate(self, minibatch):
samples_per_frame = self.params.hop_samples
for record in minibatch:
if self.params.unconditional:
# Filter out records that aren't long enough.
if len(record['audio']) < self.params.audio_len:
del record['spectrogram']
del record['audio']
continue
start = random.randint(0, record['audio'].shape[-1] - self.params.audio_len)
end = start + self.params.audio_len
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant')
else:
# Filter out records that aren't long enough.
if len(record['spectrogram']) < self.params.crop_mel_frames:
del record['spectrogram']
del record['audio']
continue
start = random.randint(0, record['spectrogram'].shape[0] - self.params.crop_mel_frames)
end = start + self.params.crop_mel_frames
record['spectrogram'] = record['spectrogram'][start:end].T
start *= samples_per_frame
end *= samples_per_frame
record['audio'] = record['audio'][start:end]
record['audio'] = np.pad(record['audio'], (0, (end-start) - len(record['audio'])), mode='constant')
audio = np.stack([record['audio'] for record in minibatch if 'audio' in record])
if self.params.unconditional:
return {
'audio': torch.from_numpy(audio),
'spectrogram': None,
}
spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record])
return {
'audio': torch.from_numpy(audio),
'spectrogram': torch.from_numpy(spectrogram),
}
# for gtzan
def collate_gtzan(self, minibatch):
ldata = []
mean_audio_len = self.params.audio_len # change to fit in gpu memory
# audio total generated time = audio_len * sample_rate
# GTZAN statistics
# max len audio 675808; min len audio sample 660000; mean len audio sample 662117
# max audio sample 1; min audio sample -1; mean audio sample -0.0010 (normalized)
# sample rate of all is 22050
for data in minibatch:
if data[0].shape[-1] < mean_audio_len: # pad
data_audio = F.pad(data[0], (0, mean_audio_len - data[0].shape[-1]), mode='constant', value=0)
elif data[0].shape[-1] > mean_audio_len: # crop
start = random.randint(0, data[0].shape[-1] - mean_audio_len)
end = start + mean_audio_len
data_audio = data[0][:, start:end]
else:
data_audio = data[0]
ldata.append(data_audio)
audio = torch.cat(ldata, dim=0)
return {
'audio': audio,
'spectrogram': None,
}
def from_path(data_dirs, params, is_distributed=False):
if params.unconditional:
dataset = UnconditionalDataset(data_dirs)
else:#with condition
dataset = ConditionalDataset(data_dirs)
return torch.utils.data.DataLoader(
dataset,
batch_size=params.batch_size,
collate_fn=Collator(params).collate,
shuffle=not is_distributed,
num_workers=os.cpu_count(),
sampler=DistributedSampler(dataset) if is_distributed else None,
pin_memory=True,
drop_last=True)
def from_gtzan(params, is_distributed=False):
dataset = torchaudio.datasets.GTZAN('./data', download=True)
return torch.utils.data.DataLoader(
dataset,
batch_size=params.batch_size,
collate_fn=Collator(params).collate_gtzan,
shuffle=not is_distributed,
num_workers=os.cpu_count(),
sampler=DistributedSampler(dataset) if is_distributed else None,
pin_memory=True,
drop_last=True)
| diffwave-master | src/diffwave/dataset.py |
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import torch
import torchaudio
from argparse import ArgumentParser
from diffwave.params import AttrDict, params as base_params
from diffwave.model import DiffWave
models = {}
def predict(spectrogram=None, model_dir=None, params=None, device=torch.device('cuda'), fast_sampling=False):
# Lazy load model.
if not model_dir in models:
if os.path.exists(f'{model_dir}/weights.pt'):
checkpoint = torch.load(f'{model_dir}/weights.pt')
else:
checkpoint = torch.load(model_dir)
model = DiffWave(AttrDict(base_params)).to(device)
model.load_state_dict(checkpoint['model'])
model.eval()
models[model_dir] = model
model = models[model_dir]
model.params.override(params)
with torch.no_grad():
# Change in notation from the DiffWave paper for fast sampling.
# DiffWave paper -> Implementation below
# --------------------------------------
# alpha -> talpha
# beta -> training_noise_schedule
# gamma -> alpha
# eta -> beta
training_noise_schedule = np.array(model.params.noise_schedule)
inference_noise_schedule = np.array(model.params.inference_noise_schedule) if fast_sampling else training_noise_schedule
talpha = 1 - training_noise_schedule
talpha_cum = np.cumprod(talpha)
beta = inference_noise_schedule
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
T = []
for s in range(len(inference_noise_schedule)):
for t in range(len(training_noise_schedule) - 1):
if talpha_cum[t+1] <= alpha_cum[s] <= talpha_cum[t]:
twiddle = (talpha_cum[t]**0.5 - alpha_cum[s]**0.5) / (talpha_cum[t]**0.5 - talpha_cum[t+1]**0.5)
T.append(t + twiddle)
break
T = np.array(T, dtype=np.float32)
if not model.params.unconditional:
if len(spectrogram.shape) == 2:# Expand rank 2 tensors by adding a batch dimension.
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(spectrogram.shape[0], model.params.hop_samples * spectrogram.shape[-1], device=device)
else:
audio = torch.randn(1, params.audio_len, device=device)
noise_scale = torch.from_numpy(alpha_cum**0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n]**0.5
c2 = beta[n] / (1 - alpha_cum[n])**0.5
audio = c1 * (audio - c2 * model(audio, torch.tensor([T[n]], device=audio.device), spectrogram).squeeze(1))
if n > 0:
noise = torch.randn_like(audio)
sigma = ((1.0 - alpha_cum[n-1]) / (1.0 - alpha_cum[n]) * beta[n])**0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio, model.params.sample_rate
def main(args):
if args.spectrogram_path:
spectrogram = torch.from_numpy(np.load(args.spectrogram_path))
else:
spectrogram = None
audio, sr = predict(spectrogram, model_dir=args.model_dir, fast_sampling=args.fast, params=base_params)
torchaudio.save(args.output, audio.cpu(), sample_rate=sr)
if __name__ == '__main__':
parser = ArgumentParser(description='runs inference on a spectrogram file generated by diffwave.preprocess')
parser.add_argument('model_dir',
help='directory containing a trained model (or full path to weights.pt file)')
parser.add_argument('--spectrogram_path', '-s',
help='path to a spectrogram file generated by diffwave.preprocess')
parser.add_argument('--output', '-o', default='output.wav',
help='output file name')
parser.add_argument('--fast', '-f', action='store_true',
help='fast sampling procedure')
main(parser.parse_args())
| diffwave-master | src/diffwave/inference.py |
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from argparse import ArgumentParser
from torch.cuda import device_count
from torch.multiprocessing import spawn
from diffwave.learner import train, train_distributed
from diffwave.params import params
def _get_free_port():
import socketserver
with socketserver.TCPServer(('localhost', 0), None) as s:
return s.server_address[1]
def main(args):
replica_count = device_count()
if replica_count > 1:
if params.batch_size % replica_count != 0:
raise ValueError(f'Batch size {params.batch_size} is not evenly divisble by # GPUs {replica_count}.')
params.batch_size = params.batch_size // replica_count
port = _get_free_port()
spawn(train_distributed, args=(replica_count, port, args, params), nprocs=replica_count, join=True)
else:
train(args, params)
if __name__ == '__main__':
parser = ArgumentParser(description='train (or resume training) a DiffWave model')
parser.add_argument('model_dir',
help='directory in which to store model checkpoints and training logs')
parser.add_argument('data_dirs', nargs='+',
help='space separated list of directories from which to read .wav files for training')
parser.add_argument('--max_steps', default=None, type=int,
help='maximum number of training steps')
parser.add_argument('--fp16', action='store_true', default=False,
help='use 16-bit floating point operations for training')
main(parser.parse_args())
| diffwave-master | src/diffwave/__main__.py |
# Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import torch
import torch.nn as nn
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from diffwave.dataset import from_path, from_gtzan
from diffwave.model import DiffWave
from diffwave.params import AttrDict
def _nested_map(struct, map_fn):
if isinstance(struct, tuple):
return tuple(_nested_map(x, map_fn) for x in struct)
if isinstance(struct, list):
return [_nested_map(x, map_fn) for x in struct]
if isinstance(struct, dict):
return { k: _nested_map(v, map_fn) for k, v in struct.items() }
return map_fn(struct)
class DiffWaveLearner:
def __init__(self, model_dir, model, dataset, optimizer, params, *args, **kwargs):
os.makedirs(model_dir, exist_ok=True)
self.model_dir = model_dir
self.model = model
self.dataset = dataset
self.optimizer = optimizer
self.params = params
self.autocast = torch.cuda.amp.autocast(enabled=kwargs.get('fp16', False))
self.scaler = torch.cuda.amp.GradScaler(enabled=kwargs.get('fp16', False))
self.step = 0
self.is_master = True
beta = np.array(self.params.noise_schedule)
noise_level = np.cumprod(1 - beta)
self.noise_level = torch.tensor(noise_level.astype(np.float32))
self.loss_fn = nn.L1Loss()
self.summary_writer = None
def state_dict(self):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
return {
'step': self.step,
'model': { k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in model_state.items() },
'optimizer': { k: v.cpu() if isinstance(v, torch.Tensor) else v for k, v in self.optimizer.state_dict().items() },
'params': dict(self.params),
'scaler': self.scaler.state_dict(),
}
def load_state_dict(self, state_dict):
if hasattr(self.model, 'module') and isinstance(self.model.module, nn.Module):
self.model.module.load_state_dict(state_dict['model'])
else:
self.model.load_state_dict(state_dict['model'])
self.optimizer.load_state_dict(state_dict['optimizer'])
self.scaler.load_state_dict(state_dict['scaler'])
self.step = state_dict['step']
def save_to_checkpoint(self, filename='weights'):
save_basename = f'{filename}-{self.step}.pt'
save_name = f'{self.model_dir}/{save_basename}'
link_name = f'{self.model_dir}/{filename}.pt'
torch.save(self.state_dict(), save_name)
if os.name == 'nt':
torch.save(self.state_dict(), link_name)
else:
if os.path.islink(link_name):
os.unlink(link_name)
os.symlink(save_basename, link_name)
def restore_from_checkpoint(self, filename='weights'):
try:
checkpoint = torch.load(f'{self.model_dir}/{filename}.pt')
self.load_state_dict(checkpoint)
return True
except FileNotFoundError:
return False
def train(self, max_steps=None):
device = next(self.model.parameters()).device
while True:
for features in tqdm(self.dataset, desc=f'Epoch {self.step // len(self.dataset)}') if self.is_master else self.dataset:
if max_steps is not None and self.step >= max_steps:
return
features = _nested_map(features, lambda x: x.to(device) if isinstance(x, torch.Tensor) else x)
loss = self.train_step(features)
if torch.isnan(loss).any():
raise RuntimeError(f'Detected NaN loss at step {self.step}.')
if self.is_master:
if self.step % 50 == 0:
self._write_summary(self.step, features, loss)
if self.step % len(self.dataset) == 0:
self.save_to_checkpoint()
self.step += 1
def train_step(self, features):
for param in self.model.parameters():
param.grad = None
audio = features['audio']
spectrogram = features['spectrogram']
N, T = audio.shape
device = audio.device
self.noise_level = self.noise_level.to(device)
with self.autocast:
t = torch.randint(0, len(self.params.noise_schedule), [N], device=audio.device)
noise_scale = self.noise_level[t].unsqueeze(1)
noise_scale_sqrt = noise_scale**0.5
noise = torch.randn_like(audio)
noisy_audio = noise_scale_sqrt * audio + (1.0 - noise_scale)**0.5 * noise
predicted = self.model(noisy_audio, t, spectrogram)
loss = self.loss_fn(noise, predicted.squeeze(1))
self.scaler.scale(loss).backward()
self.scaler.unscale_(self.optimizer)
self.grad_norm = nn.utils.clip_grad_norm_(self.model.parameters(), self.params.max_grad_norm or 1e9)
self.scaler.step(self.optimizer)
self.scaler.update()
return loss
def _write_summary(self, step, features, loss):
writer = self.summary_writer or SummaryWriter(self.model_dir, purge_step=step)
writer.add_audio('feature/audio', features['audio'][0], step, sample_rate=self.params.sample_rate)
if not self.params.unconditional:
writer.add_image('feature/spectrogram', torch.flip(features['spectrogram'][:1], [1]), step)
writer.add_scalar('train/loss', loss, step)
writer.add_scalar('train/grad_norm', self.grad_norm, step)
writer.flush()
self.summary_writer = writer
def _train_impl(replica_id, model, dataset, args, params):
torch.backends.cudnn.benchmark = True
opt = torch.optim.Adam(model.parameters(), lr=params.learning_rate)
learner = DiffWaveLearner(args.model_dir, model, dataset, opt, params, fp16=args.fp16)
learner.is_master = (replica_id == 0)
learner.restore_from_checkpoint()
learner.train(max_steps=args.max_steps)
def train(args, params):
if args.data_dirs[0] == 'gtzan':
dataset = from_gtzan(params)
else:
dataset = from_path(args.data_dirs, params)
model = DiffWave(params).cuda()
_train_impl(0, model, dataset, args, params)
def train_distributed(replica_id, replica_count, port, args, params):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(port)
torch.distributed.init_process_group('nccl', rank=replica_id, world_size=replica_count)
if args.data_dirs[0] == 'gtzan':
dataset = from_gtzan(params, is_distributed=True)
else:
dataset = from_path(args.data_dirs, params, is_distributed=True)
device = torch.device('cuda', replica_id)
torch.cuda.set_device(device)
model = DiffWave(params).to(device)
model = DistributedDataParallel(model, device_ids=[replica_id])
_train_impl(replica_id, model, dataset, args, params)
| diffwave-master | src/diffwave/learner.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
import faiss
import numpy as np
from PIL import Image
from PIL import ImageFile
from scipy.sparse import csr_matrix, find
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
ImageFile.LOAD_TRUNCATED_IMAGES = True
__all__ = ['PIC', 'Kmeans', 'cluster_assign', 'arrange_clustering']
def pil_loader(path):
"""Loads an image.
Args:
path (string): path to image file
Returns:
Image
"""
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
class ReassignedDataset(data.Dataset):
"""A dataset where the new images labels are given in argument.
Args:
image_indexes (list): list of data indexes
pseudolabels (list): list of labels for each data
dataset (list): list of tuples with paths to images
transform (callable, optional): a function/transform that takes in
an PIL image and returns a
transformed version
"""
def __init__(self, image_indexes, pseudolabels, dataset, transform=None):
self.imgs = self.make_dataset(image_indexes, pseudolabels, dataset)
self.transform = transform
def make_dataset(self, image_indexes, pseudolabels, dataset):
label_to_idx = {label: idx for idx, label in enumerate(set(pseudolabels))}
images = []
for j, idx in enumerate(image_indexes):
path = dataset[idx][0]
pseudolabel = label_to_idx[pseudolabels[j]]
images.append((path, pseudolabel))
return images
def __getitem__(self, index):
"""
Args:
index (int): index of data
Returns:
tuple: (image, pseudolabel) where pseudolabel is the cluster of index datapoint
"""
path, pseudolabel = self.imgs[index]
img = pil_loader(path)
if self.transform is not None:
img = self.transform(img)
return img, pseudolabel
def __len__(self):
return len(self.imgs)
def preprocess_features(npdata, pca=256):
"""Preprocess an array of features.
Args:
npdata (np.array N * ndim): features to preprocess
pca (int): dim of output
Returns:
np.array of dim N * pca: data PCA-reduced, whitened and L2-normalized
"""
_, ndim = npdata.shape
npdata = npdata.astype('float32')
# Apply PCA-whitening with Faiss
mat = faiss.PCAMatrix (ndim, pca, eigen_power=-0.5)
mat.train(npdata)
assert mat.is_trained
npdata = mat.apply_py(npdata)
# L2 normalization
row_sums = np.linalg.norm(npdata, axis=1)
npdata = npdata / row_sums[:, np.newaxis]
return npdata
def make_graph(xb, nnn):
"""Builds a graph of nearest neighbors.
Args:
xb (np.array): data
nnn (int): number of nearest neighbors
Returns:
list: for each data the list of ids to its nnn nearest neighbors
list: for each data the list of distances to its nnn NN
"""
N, dim = xb.shape
# we need only a StandardGpuResources per GPU
res = faiss.StandardGpuResources()
# L2
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = int(torch.cuda.device_count()) - 1
index = faiss.GpuIndexFlatL2(res, dim, flat_config)
index.add(xb)
D, I = index.search(xb, nnn + 1)
return I, D
def cluster_assign(images_lists, dataset):
"""Creates a dataset from clustering, with clusters as labels.
Args:
images_lists (list of list): for each cluster, the list of image indexes
belonging to this cluster
dataset (list): initial dataset
Returns:
ReassignedDataset(torch.utils.data.Dataset): a dataset with clusters as
labels
"""
assert images_lists is not None
pseudolabels = []
image_indexes = []
for cluster, images in enumerate(images_lists):
image_indexes.extend(images)
pseudolabels.extend([cluster] * len(images))
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
t = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize])
return ReassignedDataset(image_indexes, pseudolabels, dataset, t)
def run_kmeans(x, nmb_clusters, verbose=False):
"""Runs kmeans on 1 GPU.
Args:
x: data
nmb_clusters (int): number of clusters
Returns:
list: ids of data in each cluster
"""
n_data, d = x.shape
# faiss implementation of k-means
clus = faiss.Clustering(d, nmb_clusters)
# Change faiss seed at each k-means so that the randomly picked
# initialization centroids do not correspond to the same feature ids
# from an epoch to another.
clus.seed = np.random.randint(1234)
clus.niter = 20
clus.max_points_per_centroid = 10000000
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.useFloat16 = False
flat_config.device = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
# perform the training
clus.train(x, index)
_, I = index.search(x, 1)
losses = faiss.vector_to_array(clus.obj)
if verbose:
print('k-means loss evolution: {0}'.format(losses))
return [int(n[0]) for n in I], losses[-1]
def arrange_clustering(images_lists):
pseudolabels = []
image_indexes = []
for cluster, images in enumerate(images_lists):
image_indexes.extend(images)
pseudolabels.extend([cluster] * len(images))
indexes = np.argsort(image_indexes)
return np.asarray(pseudolabels)[indexes]
class Kmeans(object):
def __init__(self, k):
self.k = k
def cluster(self, data, verbose=False):
"""Performs k-means clustering.
Args:
x_data (np.array N * dim): data to cluster
"""
end = time.time()
# PCA-reducing, whitening and L2-normalization
xb = preprocess_features(data)
# cluster the data
I, loss = run_kmeans(xb, self.k, verbose)
self.images_lists = [[] for i in range(self.k)]
for i in range(len(data)):
self.images_lists[I[i]].append(i)
if verbose:
print('k-means time: {0:.0f} s'.format(time.time() - end))
return loss
def make_adjacencyW(I, D, sigma):
"""Create adjacency matrix with a Gaussian kernel.
Args:
I (numpy array): for each vertex the ids to its nnn linked vertices
+ first column of identity.
D (numpy array): for each data the l2 distances to its nnn linked vertices
+ first column of zeros.
sigma (float): Bandwidth of the Gaussian kernel.
Returns:
csr_matrix: affinity matrix of the graph.
"""
V, k = I.shape
k = k - 1
indices = np.reshape(np.delete(I, 0, 1), (1, -1))
indptr = np.multiply(k, np.arange(V + 1))
def exp_ker(d):
return np.exp(-d / sigma**2)
exp_ker = np.vectorize(exp_ker)
res_D = exp_ker(D)
data = np.reshape(np.delete(res_D, 0, 1), (1, -1))
adj_matrix = csr_matrix((data[0], indices[0], indptr), shape=(V, V))
return adj_matrix
def run_pic(I, D, sigma, alpha):
"""Run PIC algorithm"""
a = make_adjacencyW(I, D, sigma)
graph = a + a.transpose()
cgraph = graph
nim = graph.shape[0]
W = graph
t0 = time.time()
v0 = np.ones(nim) / nim
# power iterations
v = v0.astype('float32')
t0 = time.time()
dt = 0
for i in range(200):
vnext = np.zeros(nim, dtype='float32')
vnext = vnext + W.transpose().dot(v)
vnext = alpha * vnext + (1 - alpha) / nim
# L1 normalize
vnext /= vnext.sum()
v = vnext
if i == 200 - 1:
clust = find_maxima_cluster(W, v)
return [int(i) for i in clust]
def find_maxima_cluster(W, v):
n, m = W.shape
assert (n == m)
assign = np.zeros(n)
# for each node
pointers = list(range(n))
for i in range(n):
best_vi = 0
l0 = W.indptr[i]
l1 = W.indptr[i + 1]
for l in range(l0, l1):
j = W.indices[l]
vi = W.data[l] * (v[j] - v[i])
if vi > best_vi:
best_vi = vi
pointers[i] = j
n_clus = 0
cluster_ids = -1 * np.ones(n)
for i in range(n):
if pointers[i] == i:
cluster_ids[i] = n_clus
n_clus = n_clus + 1
for i in range(n):
# go from pointers to pointers starting from i until reached a local optim
current_node = i
while pointers[current_node] != current_node:
current_node = pointers[current_node]
assign[i] = cluster_ids[current_node]
assert (assign[i] >= 0)
return assign
class PIC(object):
"""Class to perform Power Iteration Clustering on a graph of nearest neighbors.
Args:
args: for consistency with k-means init
sigma (float): bandwidth of the Gaussian kernel (default 0.2)
nnn (int): number of nearest neighbors (default 5)
alpha (float): parameter in PIC (default 0.001)
distribute_singletons (bool): If True, reassign each singleton to
the cluster of its closest non
singleton nearest neighbors (up to nnn
nearest neighbors).
Attributes:
images_lists (list of list): for each cluster, the list of image indexes
belonging to this cluster
"""
def __init__(self, args=None, sigma=0.2, nnn=5, alpha=0.001, distribute_singletons=True):
self.sigma = sigma
self.alpha = alpha
self.nnn = nnn
self.distribute_singletons = distribute_singletons
def cluster(self, data, verbose=False):
end = time.time()
# preprocess the data
xb = preprocess_features(data)
# construct nnn graph
I, D = make_graph(xb, self.nnn)
# run PIC
clust = run_pic(I, D, self.sigma, self.alpha)
images_lists = {}
for h in set(clust):
images_lists[h] = []
for data, c in enumerate(clust):
images_lists[c].append(data)
# allocate singletons to clusters of their closest NN not singleton
if self.distribute_singletons:
clust_NN = {}
for i in images_lists:
# if singleton
if len(images_lists[i]) == 1:
s = images_lists[i][0]
# for NN
for n in I[s, 1:]:
# if NN is not a singleton
if not len(images_lists[clust[n]]) == 1:
clust_NN[s] = n
break
for s in clust_NN:
del images_lists[clust[s]]
clust[s] = clust[clust_NN[s]]
images_lists[clust[s]].append(s)
self.images_lists = []
for c in images_lists:
self.images_lists.append(images_lists[c])
if verbose:
print('pic time: {0:.0f} s'.format(time.time() - end))
return 0
| deepcluster-main | clustering.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import pickle
import numpy as np
import torch
from torch.utils.data.sampler import Sampler
import models
def load_model(path):
"""Loads model and return it without DataParallel table."""
if os.path.isfile(path):
print("=> loading checkpoint '{}'".format(path))
checkpoint = torch.load(path)
# size of the top layer
N = checkpoint['state_dict']['top_layer.bias'].size()
# build skeleton of the model
sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()
model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))
# deal with a dataparallel table
def rename_key(key):
if not 'module' in key:
return key
return ''.join(key.split('.module'))
checkpoint['state_dict'] = {rename_key(key): val
for key, val
in checkpoint['state_dict'].items()}
# load weights
model.load_state_dict(checkpoint['state_dict'])
print("Loaded")
else:
model = None
print("=> no checkpoint found at '{}'".format(path))
return model
class UnifLabelSampler(Sampler):
"""Samples elements uniformely accross pseudolabels.
Args:
N (int): size of returned iterator.
images_lists: dict of key (target), value (list of data with this target)
"""
def __init__(self, N, images_lists):
self.N = N
self.images_lists = images_lists
self.indexes = self.generate_indexes_epoch()
def generate_indexes_epoch(self):
nmb_non_empty_clusters = 0
for i in range(len(self.images_lists)):
if len(self.images_lists[i]) != 0:
nmb_non_empty_clusters += 1
size_per_pseudolabel = int(self.N / nmb_non_empty_clusters) + 1
res = np.array([])
for i in range(len(self.images_lists)):
# skip empty clusters
if len(self.images_lists[i]) == 0:
continue
indexes = np.random.choice(
self.images_lists[i],
size_per_pseudolabel,
replace=(len(self.images_lists[i]) <= size_per_pseudolabel)
)
res = np.concatenate((res, indexes))
np.random.shuffle(res)
res = list(res.astype('int'))
if len(res) >= self.N:
return res[:self.N]
res += res[: (self.N - len(res))]
return res
def __iter__(self):
return iter(self.indexes)
def __len__(self):
return len(self.indexes)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def learning_rate_decay(optimizer, t, lr_0):
for param_group in optimizer.param_groups:
lr = lr_0 / np.sqrt(1 + lr_0 * param_group['weight_decay'] * t)
param_group['lr'] = lr
class Logger(object):
""" Class to update every epoch to keep trace of the results
Methods:
- log() log and save
"""
def __init__(self, path):
self.path = path
self.data = []
def log(self, train_point):
self.data.append(train_point)
with open(os.path.join(self.path), 'wb') as fp:
pickle.dump(self.data, fp, -1)
| deepcluster-main | util.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from collections import OrderedDict
import os
import pickle
import subprocess
import sys
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.autograd import Variable
from util import load_model
class ImageHelper:
def __init__(self, S, L, transforms):
self.S = S
self.L = L
self.transforms = transforms
def load_and_prepare_image(self, fname, roi=None):
# Read image, get aspect ratio, and resize such as the largest side equals S
im = Image.open(fname)
im_size_hw = np.array((im.size[1], im.size[0]))
if self.S == -1:
ratio = 1.0
elif self.S == -2:
if np.max(im_size_hw) > 124:
ratio = 1024.0/np.max(im_size_hw)
else:
ratio = -1
else:
ratio = float(self.S)/np.max(im_size_hw)
new_size = tuple(np.round(im_size_hw * ratio).astype(np.int32))
im_resized = self.transforms(im.resize((new_size[1], new_size[0]), Image.BILINEAR))
# If there is a roi, adapt the roi to the new size and crop. Do not rescale
# the image once again
if roi is not None:
# ROI format is (xmin,ymin,xmax,ymax)
roi = np.round(roi * ratio).astype(np.int32)
im_resized = im_resized[:, roi[1]:roi[3], roi[0]:roi[2]]
return im_resized
def get_rmac_region_coordinates(self, H, W, L):
# Almost verbatim from Tolias et al Matlab implementation.
# Could be heavily pythonized, but really not worth it...
# Desired overlap of neighboring regions
ovr = 0.4
# Possible regions for the long dimension
steps = np.array((2, 3, 4, 5, 6, 7), dtype=np.float32)
w = np.minimum(H, W)
b = (np.maximum(H, W) - w) / (steps - 1)
# steps(idx) regions for long dimension. The +1 comes from Matlab
# 1-indexing...
idx = np.argmin(np.abs(((w**2 - w * b) / w**2) - ovr)) + 1
# Region overplus per dimension
Wd = 0
Hd = 0
if H < W:
Wd = idx
elif H > W:
Hd = idx
regions_xywh = []
for l in range(1, L+1):
wl = np.floor(2 * w / (l + 1))
wl2 = np.floor(wl / 2 - 1)
# Center coordinates
if l + Wd - 1 > 0:
b = (W - wl) / (l + Wd - 1)
else:
b = 0
cenW = np.floor(wl2 + b * np.arange(l - 1 + Wd + 1)) - wl2
# Center coordinates
if l + Hd - 1 > 0:
b = (H - wl) / (l + Hd - 1)
else:
b = 0
cenH = np.floor(wl2 + b * np.arange(l - 1 + Hd + 1)) - wl2
for i_ in cenH:
for j_ in cenW:
regions_xywh.append([j_, i_, wl, wl])
# Round the regions. Careful with the borders!
for i in range(len(regions_xywh)):
for j in range(4):
regions_xywh[i][j] = int(round(regions_xywh[i][j]))
if regions_xywh[i][0] + regions_xywh[i][2] > W:
regions_xywh[i][0] -= ((regions_xywh[i][0] + regions_xywh[i][2]) - W)
if regions_xywh[i][1] + regions_xywh[i][3] > H:
regions_xywh[i][1] -= ((regions_xywh[i][1] + regions_xywh[i][3]) - H)
return np.array(regions_xywh)
class PCA(object):
'''
Fits and applies PCA whitening
'''
def __init__(self, n_components):
self.n_components = n_components
def fit(self, X):
mean = X.mean(axis=0)
X -= mean
self.mean = Variable(torch.from_numpy(mean).view(1, -1))
Xcov = np.dot(X.T, X)
d, V = np.linalg.eigh(Xcov)
eps = d.max() * 1e-5
n_0 = (d < eps).sum()
if n_0 > 0:
print("%d / %d singular values are 0" % (n_0, d.size))
d[d < eps] = eps
totenergy = d.sum()
idx = np.argsort(d)[::-1][:self.n_components]
d = d[idx]
V = V[:, idx]
print("keeping %.2f %% of the energy" % (d.sum() / totenergy * 100.0))
D = np.diag(1. / np.sqrt(d))
self.DVt = Variable(torch.from_numpy(np.dot(D, V.T)))
def to_cuda(self):
self.mean = self.mean.cuda()
self.DVt = self.DVt.cuda()
def apply(self, X):
X = X - self.mean
num = torch.mm(self.DVt, X.transpose(0, 1)).transpose(0, 1)
# L2 normalize on output
return num
class Dataset:
def __init__(self, path, eval_binary_path):
self.path = path
self.eval_binary_path = eval_binary_path
# Some images from the Paris dataset are corrupted. Standard practice is
# to ignore them
self.blacklisted = set(["paris_louvre_000136",
"paris_louvre_000146",
"paris_moulinrouge_000422",
"paris_museedorsay_001059",
"paris_notredame_000188",
"paris_pantheon_000284",
"paris_pantheon_000960",
"paris_pantheon_000974",
"paris_pompidou_000195",
"paris_pompidou_000196",
"paris_pompidou_000201",
"paris_pompidou_000467",
"paris_pompidou_000640",
"paris_sacrecoeur_000299",
"paris_sacrecoeur_000330",
"paris_sacrecoeur_000353",
"paris_triomphe_000662",
"paris_triomphe_000833",
"paris_triomphe_000863",
"paris_triomphe_000867"])
self.load()
def load(self):
# Load the dataset GT
self.lab_root = '{0}/lab/'.format(self.path)
self.img_root = '{0}/jpg/'.format(self.path)
lab_filenames = np.sort(os.listdir(self.lab_root))
# Get the filenames without the extension
self.img_filenames = [e[:-4] for e in np.sort(os.listdir(self.img_root))
if e[:-4] not in self.blacklisted]
# Parse the label files. Some challenges as filenames do not correspond
# exactly to query names. Go through all the labels to:
# i) map names to filenames and vice versa
# ii) get the relevant regions of interest of the queries,
# iii) get the indexes of the dataset images that are queries
# iv) get the relevants / non-relevants list
self.relevants = {}
self.junk = {}
self.non_relevants = {}
self.filename_to_name = {}
self.name_to_filename = OrderedDict()
self.q_roi = {}
for e in lab_filenames:
if e.endswith('_query.txt'):
q_name = e[:-len('_query.txt')]
q_data = open("{0}/{1}".format(self.lab_root, e)).readline().split(" ")
q_filename = q_data[0][5:] if q_data[0].startswith('oxc1_') else q_data[0]
self.filename_to_name[q_filename] = q_name
self.name_to_filename[q_name] = q_filename
good = set([e.strip() for e in open("{0}/{1}_ok.txt".format(self.lab_root, q_name))])
good = good.union(set([e.strip() for e in open("{0}/{1}_good.txt".format(self.lab_root, q_name))]))
junk = set([e.strip() for e in open("{0}/{1}_junk.txt".format(self.lab_root, q_name))])
good_plus_junk = good.union(junk)
self.relevants[q_name] = [i for i in range(len(self.img_filenames))
if self.img_filenames[i] in good]
self.junk[q_name] = [i for i in range(len(self.img_filenames))
if self.img_filenames[i] in junk]
self.non_relevants[q_name] = [i for i in range(len(self.img_filenames))
if self.img_filenames[i] not in good_plus_junk]
self.q_roi[q_name] = np.array([float(q) for q in q_data[1:]], dtype=np.float32)
#np.array(map(float, q_data[1:]), dtype=np.float32)
self.q_names = self.name_to_filename.keys()
self.q_index = np.array([self.img_filenames.index(self.name_to_filename[qn])
for qn in self.q_names])
self.N_images = len(self.img_filenames)
self.N_queries = len(self.q_index)
def score(self, sim, temp_dir, eval_bin):
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
idx = np.argsort(sim, axis=1)[:, ::-1]
maps = [self.score_rnk_partial(i, idx[i], temp_dir, eval_bin)
for i in range(len(self.q_names))]
for i in range(len(self.q_names)):
print("{0}: {1:.2f}".format(self.q_names[i], 100 * maps[i]))
print(20 * "-")
print("Mean: {0:.2f}".format(100 * np.mean(maps)))
def score_rnk_partial(self, i, idx, temp_dir, eval_bin):
rnk = np.array(self.img_filenames)[idx]
with open("{0}/{1}.rnk".format(temp_dir, self.q_names[i]), 'w') as f:
f.write("\n".join(rnk)+"\n")
cmd = "{0} {1}{2} {3}/{4}.rnk".format(eval_bin, self.lab_root, self.q_names[i], temp_dir, self.q_names[i])
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
map_ = float(p.stdout.readlines()[0])
p.wait()
return map_
def get_filename(self, i):
return os.path.normpath("{0}/{1}.jpg".format(self.img_root,
self.img_filenames[i]))
def get_query_filename(self, i):
return os.path.normpath("{0}/{1}.jpg".format(self.img_root,
self.img_filenames[self.q_index[i]]))
def get_query_roi(self, i):
return self.q_roi[self.q_names[i]]
def ensure_directory_exists(fname):
dirname = fname[:fname.rfind('/')]
if not os.path.exists(dirname):
os.makedirs(dirname)
def normalize_L2(a, dim):
norms = torch.sqrt(torch.sum(a**2, dim=dim, keepdim=True))
return a / norms
def rmac(features, rmac_levels, pca=None):
nim, nc, xd, yd = features.size()
rmac_regions = image_helper.get_rmac_region_coordinates(xd, yd, rmac_levels)
rmac_regions = rmac_regions.astype(np.int)
nr = len(rmac_regions)
rmac_descriptors = []
for x0, y0, w, h in rmac_regions:
desc = features[:, :, y0:y0 + h, x0:x0 + w]
desc = torch.max(desc, 2, keepdim=True)[0]
desc = torch.max(desc, 3, keepdim=True)[0]
# insert an additional dimension for the cat to work
rmac_descriptors.append(desc.view(-1, 1, nc))
rmac_descriptors = torch.cat(rmac_descriptors, 1)
rmac_descriptors = normalize_L2(rmac_descriptors, 2)
if pca is None:
return rmac_descriptors
# PCA + whitening
npca = pca.n_components
rmac_descriptors = pca.apply(rmac_descriptors.view(nr * nim, nc))
rmac_descriptors = normalize_L2(rmac_descriptors, 1)
rmac_descriptors = rmac_descriptors.view(nim, nr, npca)
# Sum aggregation and L2-normalization
rmac_descriptors = torch.sum(rmac_descriptors, 1)
rmac_descriptors = normalize_L2(rmac_descriptors, 1)
return rmac_descriptors
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate Oxford / Paris')
parser.add_argument('--S', type=int, default=1024,
help='Resize larger side of image to S pixels (e.g. 800)')
parser.add_argument('--L', type=int, default=3,
help='Use L spatial levels (e.g. 3)')
parser.add_argument('--n_pca', type=int, default=512,
help='output dimension of PCA')
parser.add_argument('--model', type=str, default='pretrained',
help='Model from which RMAC is computed')
parser.add_argument('--dataset', type=str, required=True,
help='path to dataset')
parser.add_argument('--dataset_name', type=str, default='Oxford',
choices=['Oxford', 'Paris'], help='Dataset name')
parser.add_argument('--stage', type=str, default='extract_train',
choices=['extract_train', 'train_pca', 'db_features',
'q_features', 'eval'], help='what action to perform ')
parser.add_argument('--eval_binary', type=str, required=True,
help='Path to the compute_ap binary to evaluate Oxford / Paris')
parser.add_argument('--temp_dir', type=str, default='',
help='Path to a temporary directory to store features and scores')
parser.add_argument('--multires', dest='multires', action='store_true',
help='Enable multiresolution features')
parser.add_argument('--aqe', type=int, required=False,
help='Average query expansion with k neighbors')
parser.add_argument('--dbe', type=int, required=False,
help='Database expansion with k neighbors')
parser.set_defaults(multires=False)
args = parser.parse_args()
# Load the dataset and the image helper
print "Prepare the dataset from ", args.dataset
dataset = Dataset(args.dataset, args.eval_binary)
ensure_directory_exists(args.temp_dir + '/')
if args.stage in ('extract_train', 'db_features', 'q_features'):
if args.model == 'pretrained':
print("loading supervised pretrained VGG-16")
net = torchvision.models.vgg16_bn(pretrained=True)
else:
net = load_model(args.model)
transforms_comp = []
features_layers = list(net.features.children())[:-1]
net.features = torch.nn.Sequential(*features_layers)
transforms_comp.extend([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
transforms = torchvision.transforms.Compose(transforms_comp)
print("moving to GPU")
net.cuda()
net.eval()
print(" done")
print("initialize image helper")
image_helper = ImageHelper(args.S, args.L, transforms)
if args.stage == 'extract_train':
print("extract regions for training")
# extract at a single scale
S = args.S
image_helper.S = S
N_dataset = dataset.N_images
def process_image(i):
print(i),
sys.stdout.flush()
fname_out = "{0}/{1}_S{2}_L{3}_regions/{4}.npy".format(args.temp_dir, args.dataset_name, S, args.L, i)
ensure_directory_exists(fname_out)
I = image_helper.load_and_prepare_image(dataset.get_filename(i), roi=None)
v = torch.autograd.Variable(I.unsqueeze(0))
vc = v.cuda()
if hasattr(net, 'sobel') and net.sobel is not None:
vc = net.sobel(vc)
activation_map = net.features(vc).cpu()
rmac_descriptors = rmac(activation_map, args.L)
np.save(fname_out, rmac_descriptors.data.numpy())
map(process_image, range(dataset.N_images))
elif args.stage == 'train_pca':
# load training vectors
train_x = []
for i in range(10000):
fname_in = "{0}/{1}_S{2}_L{3}_regions/{4}.npy".format(args.temp_dir, args.dataset_name, args.S, args.L, i)
if not os.path.exists(fname_in):
break
x = np.load(fname_in)
train_x.append(x)
print("loaded %d train vectors" % len(train_x))
train_x = np.vstack([x.reshape(-1, x.shape[-1]) for x in train_x])
print(" size", train_x.shape)
pca = PCA(args.n_pca)
pca.fit(train_x)
pcaname = '%s/%s_S%d_PCA.pickle' % (args.temp_dir, args.dataset_name, args.S)
print("writing", pcaname)
pickle.dump(pca, open(pcaname, 'w'), -1)
elif args.stage == 'db_features' or args.stage == 'q_features':
# for tests on Paris, use Oxford PCA, and vice-versa
pcaname = '%s/%s_S%d_PCA.pickle' % (
args.temp_dir, 'Paris' if args.dataset_name == 'Oxford' else 'Oxford', args.S)
print("loading PCA from", pcaname)
pca = pickle.load(open(pcaname, 'r'))
print("Compute features")
# extract at a single scale
S = args.S
image_helper.S = S
N_dataset = dataset.N_images
def process_image(fname_in, roi, fname_out):
softmax = torch.nn.Softmax().cuda()
I = image_helper.load_and_prepare_image(fname_in, roi=roi)
v = torch.autograd.Variable(I.unsqueeze(0))
vc = v.cuda()
if hasattr(net, 'sobel') and net.sobel is not None:
vc = net.sobel(vc)
activation_map = net.features(vc).cpu()
descriptors = rmac(activation_map, args.L, pca=pca)
np.save(fname_out, descriptors.data.numpy())
if args.stage == 'db_features':
for i in range(dataset.N_images):
fname_in = dataset.get_filename(i)
fname_out = "{0}/{1}_S{2}_L{3}_db/{4}.npy".format(args.temp_dir, args.dataset_name, S, args.L, i)
ensure_directory_exists(fname_out)
print(i),
sys.stdout.flush()
process_image(fname_in, None, fname_out)
elif args.stage == 'q_features':
for i in range(dataset.N_queries):
fname_in = dataset.get_query_filename(i)
roi = dataset.get_query_roi(i)
fname_out = "{0}/{1}_S{2}_L{3}_q/{4}.npy".format(args.temp_dir, args.dataset_name, S, args.L, i)
ensure_directory_exists(fname_out)
print(i),
sys.stdout.flush()
process_image(fname_in, roi, fname_out)
elif args.stage == 'eval':
S = args.S
print("load query features")
features_queries = []
for i in range(dataset.N_queries):
fname = "{0}/{1}_S{2}_L{3}_q/{4}.npy".format(args.temp_dir, args.dataset_name, S, args.L, i)
features_queries.append(np.load(fname))
features_queries = np.vstack(features_queries)
print(" size", features_queries.shape)
print("load database features")
features_dataset = []
for i in range(dataset.N_images):
fname = "{0}/{1}_S{2}_L{3}_db/{4}.npy".format(args.temp_dir, args.dataset_name, S, args.L, i)
features_dataset.append(np.load(fname))
features_dataset = np.vstack(features_dataset)
print(" size", features_dataset.shape)
# Compute similarity
sim = features_queries.dot(features_dataset.T)
# Score
dataset.score(sim, args.temp_dir, args.eval_binary)
| deepcluster-main | eval_retrieval.py |
deepcluster-main | __init__.py |
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import math
import time
import glob
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
import torch.optim
import torch.utils.data
import torchvision
import torchvision.transforms as transforms
import torch.backends.cudnn as cudnn
from sklearn import metrics
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from util import AverageMeter, load_model
from eval_linear import accuracy
parser = argparse.ArgumentParser()
parser.add_argument('--vocdir', type=str, required=False, default='', help='pascal voc 2007 dataset')
parser.add_argument('--split', type=str, required=False, default='train', choices=['train', 'trainval'], help='training split')
parser.add_argument('--model', type=str, required=False, default='',
help='evaluate this model')
parser.add_argument('--nit', type=int, default=80000, help='Number of training iterations')
parser.add_argument('--fc6_8', type=int, default=1, help='If true, train only the final classifier')
parser.add_argument('--train_batchnorm', type=int, default=0, help='If true, train batch-norm layer parameters')
parser.add_argument('--eval_random_crops', type=int, default=1, help='If true, eval on 10 random crops, otherwise eval on 10 fixed crops')
parser.add_argument('--stepsize', type=int, default=5000, help='Decay step')
parser.add_argument('--lr', type=float, required=False, default=0.003, help='learning rate')
parser.add_argument('--wd', type=float, required=False, default=1e-6, help='weight decay')
parser.add_argument('--min_scale', type=float, required=False, default=0.1, help='scale')
parser.add_argument('--max_scale', type=float, required=False, default=0.5, help='scale')
parser.add_argument('--seed', type=int, default=31, help='random seed')
def main():
args = parser.parse_args()
print(args)
# fix random seeds
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# create model and move it to gpu
model = load_model(args.model)
model.top_layer = nn.Linear(model.top_layer.weight.size(1), 20)
model.cuda()
cudnn.benchmark = True
# what partition of the data to use
if args.split == 'train':
args.test = 'val'
elif args.split == 'trainval':
args.test = 'test'
# data loader
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
dataset = VOC2007_dataset(args.vocdir, split=args.split, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(224, scale=(args.min_scale, args.max_scale), ratio=(1, 1)),
transforms.ToTensor(),
normalize,
]))
loader = torch.utils.data.DataLoader(dataset,
batch_size=16, shuffle=False,
num_workers=24, pin_memory=True)
print('PASCAL VOC 2007 ' + args.split + ' dataset loaded')
# re initialize classifier
for y, m in enumerate(model.classifier.modules()):
if isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.fill_(0.1)
model.top_layer.bias.data.fill_(0.1)
if args.fc6_8:
# freeze some layers
for param in model.features.parameters():
param.requires_grad = False
# unfreeze batchnorm scaling
if args.train_batchnorm:
for layer in model.modules():
if isinstance(layer, torch.nn.BatchNorm2d):
for param in layer.parameters():
param.requires_grad = True
# set optimizer
optimizer = torch.optim.SGD(
filter(lambda x: x.requires_grad, model.parameters()),
lr=args.lr,
momentum=0.9,
weight_decay=args.wd,
)
criterion = nn.BCEWithLogitsLoss(reduction='none')
print('Start training')
it = 0
losses = AverageMeter()
while it < args.nit:
it = train(
loader,
model,
optimizer,
criterion,
args.fc6_8,
losses,
it=it,
total_iterations=args.nit,
stepsize=args.stepsize,
)
print('Evaluation')
if args.eval_random_crops:
transform_eval = [
transforms.RandomHorizontalFlip(),
transforms.RandomResizedCrop(224, scale=(args.min_scale, args.max_scale), ratio=(1, 1)),
transforms.ToTensor(),
normalize,
]
else:
transform_eval = [
transforms.Resize(256),
transforms.TenCrop(224),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops]))
]
print('Train set')
train_dataset = VOC2007_dataset(args.vocdir, split=args.split, transform=transforms.Compose(transform_eval))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=1,
shuffle=False,
num_workers=24,
pin_memory=True,
)
evaluate(train_loader, model, args.eval_random_crops)
print('Test set')
test_dataset = VOC2007_dataset(args.vocdir, split=args.test, transform=transforms.Compose(transform_eval))
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=1,
shuffle=False,
num_workers=24,
pin_memory=True,
)
evaluate(test_loader, model, args.eval_random_crops)
def evaluate(loader, model, eval_random_crops):
model.eval()
gts = []
scr = []
for crop in range(9 * eval_random_crops + 1):
for i, (input, target) in enumerate(loader):
# move input to gpu and optionally reshape it
if len(input.size()) == 5:
bs, ncrops, c, h, w = input.size()
input = input.view(-1, c, h, w)
input = input.cuda(non_blocking=True)
# forward pass without grad computation
with torch.no_grad():
output = model(input)
if crop < 1 :
scr.append(torch.sum(output, 0, keepdim=True).cpu().numpy())
gts.append(target)
else:
scr[i] += output.cpu().numpy()
gts = np.concatenate(gts, axis=0).T
scr = np.concatenate(scr, axis=0).T
aps = []
for i in range(20):
# Subtract eps from score to make AP work for tied scores
ap = metrics.average_precision_score(gts[i][gts[i]<=1], scr[i][gts[i]<=1]-1e-5*gts[i][gts[i]<=1])
aps.append( ap )
print(np.mean(aps), ' ', ' '.join(['%0.2f'%a for a in aps]))
def train(loader, model, optimizer, criterion, fc6_8, losses, it=0, total_iterations=None, stepsize=None, verbose=True):
# to log
batch_time = AverageMeter()
data_time = AverageMeter()
top1 = AverageMeter()
end = time.time()
current_iteration = it
# use dropout for the MLP
model.train()
# in the batch norms always use global statistics
model.features.eval()
for (input, target) in loader:
# measure data loading time
data_time.update(time.time() - end)
# adjust learning rate
if current_iteration != 0 and current_iteration % stepsize == 0:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.5
print('iter {0} learning rate is {1}'.format(current_iteration, param_group['lr']))
# move input to gpu
input = input.cuda(non_blocking=True)
# forward pass with or without grad computation
output = model(input)
target = target.float().cuda()
mask = (target == 255)
loss = torch.sum(criterion(output, target).masked_fill_(mask, 0)) / target.size(0)
# backward
optimizer.zero_grad()
loss.backward()
# clip gradients
torch.nn.utils.clip_grad_norm_(model.parameters(), 10)
# and weights update
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if verbose is True and current_iteration % 25 == 0:
print('Iteration[{0}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
current_iteration, batch_time=batch_time,
data_time=data_time, loss=losses))
current_iteration = current_iteration + 1
if total_iterations is not None and current_iteration == total_iterations:
break
return current_iteration
class VOC2007_dataset(torch.utils.data.Dataset):
def __init__(self, voc_dir, split='train', transform=None):
# Find the image sets
image_set_dir = os.path.join(voc_dir, 'ImageSets', 'Main')
image_sets = glob.glob(os.path.join(image_set_dir, '*_' + split + '.txt'))
assert len(image_sets) == 20
# Read the labels
self.n_labels = len(image_sets)
images = defaultdict(lambda:-np.ones(self.n_labels, dtype=np.uint8))
for k, s in enumerate(sorted(image_sets)):
for l in open(s, 'r'):
name, lbl = l.strip().split()
lbl = int(lbl)
# Switch the ignore label and 0 label (in VOC -1: not present, 0: ignore)
if lbl < 0:
lbl = 0
elif lbl == 0:
lbl = 255
images[os.path.join(voc_dir, 'JPEGImages', name + '.jpg')][k] = lbl
self.images = [(k, images[k]) for k in images.keys()]
np.random.shuffle(self.images)
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, i):
img = Image.open(self.images[i][0])
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, self.images[i][1]
if __name__ == '__main__':
main()
| deepcluster-main | eval_voc_classif.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import pickle
import time
import faiss
import numpy as np
from sklearn.metrics.cluster import normalized_mutual_info_score
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import clustering
import models
from util import AverageMeter, Logger, UnifLabelSampler
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch Implementation of DeepCluster')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--arch', '-a', type=str, metavar='ARCH',
choices=['alexnet', 'vgg16'], default='alexnet',
help='CNN architecture (default: alexnet)')
parser.add_argument('--sobel', action='store_true', help='Sobel filtering')
parser.add_argument('--clustering', type=str, choices=['Kmeans', 'PIC'],
default='Kmeans', help='clustering algorithm (default: Kmeans)')
parser.add_argument('--nmb_cluster', '--k', type=int, default=10000,
help='number of cluster for k-means (default: 10000)')
parser.add_argument('--lr', default=0.05, type=float,
help='learning rate (default: 0.05)')
parser.add_argument('--wd', default=-5, type=float,
help='weight decay pow (default: -5)')
parser.add_argument('--reassign', type=float, default=1.,
help="""how many epochs of training between two consecutive
reassignments of clusters (default: 1)""")
parser.add_argument('--workers', default=4, type=int,
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', type=int, default=200,
help='number of total epochs to run (default: 200)')
parser.add_argument('--start_epoch', default=0, type=int,
help='manual epoch number (useful on restarts) (default: 0)')
parser.add_argument('--batch', default=256, type=int,
help='mini-batch size (default: 256)')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum (default: 0.9)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to checkpoint (default: None)')
parser.add_argument('--checkpoints', type=int, default=25000,
help='how many iterations between two checkpoints (default: 25000)')
parser.add_argument('--seed', type=int, default=31, help='random seed (default: 31)')
parser.add_argument('--exp', type=str, default='', help='path to exp folder')
parser.add_argument('--verbose', action='store_true', help='chatty')
return parser.parse_args()
def main(args):
# fix random seeds
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
# CNN
if args.verbose:
print('Architecture: {}'.format(args.arch))
model = models.__dict__[args.arch](sobel=args.sobel)
fd = int(model.top_layer.weight.size()[1])
model.top_layer = None
model.features = torch.nn.DataParallel(model.features)
model.cuda()
cudnn.benchmark = True
# create optimizer
optimizer = torch.optim.SGD(
filter(lambda x: x.requires_grad, model.parameters()),
lr=args.lr,
momentum=args.momentum,
weight_decay=10**args.wd,
)
# define loss function
criterion = nn.CrossEntropyLoss().cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
# remove top_layer parameters from checkpoint
for key in checkpoint['state_dict']:
if 'top_layer' in key:
del checkpoint['state_dict'][key]
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# creating checkpoint repo
exp_check = os.path.join(args.exp, 'checkpoints')
if not os.path.isdir(exp_check):
os.makedirs(exp_check)
# creating cluster assignments log
cluster_log = Logger(os.path.join(args.exp, 'clusters'))
# preprocessing of data
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
tra = [transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize]
# load the data
end = time.time()
dataset = datasets.ImageFolder(args.data, transform=transforms.Compose(tra))
if args.verbose:
print('Load dataset: {0:.2f} s'.format(time.time() - end))
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=args.batch,
num_workers=args.workers,
pin_memory=True)
# clustering algorithm to use
deepcluster = clustering.__dict__[args.clustering](args.nmb_cluster)
# training convnet with DeepCluster
for epoch in range(args.start_epoch, args.epochs):
end = time.time()
# remove head
model.top_layer = None
model.classifier = nn.Sequential(*list(model.classifier.children())[:-1])
# get the features for the whole dataset
features = compute_features(dataloader, model, len(dataset))
# cluster the features
if args.verbose:
print('Cluster the features')
clustering_loss = deepcluster.cluster(features, verbose=args.verbose)
# assign pseudo-labels
if args.verbose:
print('Assign pseudo labels')
train_dataset = clustering.cluster_assign(deepcluster.images_lists,
dataset.imgs)
# uniformly sample per target
sampler = UnifLabelSampler(int(args.reassign * len(train_dataset)),
deepcluster.images_lists)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch,
num_workers=args.workers,
sampler=sampler,
pin_memory=True,
)
# set last fully connected layer
mlp = list(model.classifier.children())
mlp.append(nn.ReLU(inplace=True).cuda())
model.classifier = nn.Sequential(*mlp)
model.top_layer = nn.Linear(fd, len(deepcluster.images_lists))
model.top_layer.weight.data.normal_(0, 0.01)
model.top_layer.bias.data.zero_()
model.top_layer.cuda()
# train network with clusters as pseudo-labels
end = time.time()
loss = train(train_dataloader, model, criterion, optimizer, epoch)
# print log
if args.verbose:
print('###### Epoch [{0}] ###### \n'
'Time: {1:.3f} s\n'
'Clustering loss: {2:.3f} \n'
'ConvNet loss: {3:.3f}'
.format(epoch, time.time() - end, clustering_loss, loss))
try:
nmi = normalized_mutual_info_score(
clustering.arrange_clustering(deepcluster.images_lists),
clustering.arrange_clustering(cluster_log.data[-1])
)
print('NMI against previous assignment: {0:.3f}'.format(nmi))
except IndexError:
pass
print('####################### \n')
# save running checkpoint
torch.save({'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict()},
os.path.join(args.exp, 'checkpoint.pth.tar'))
# save cluster assignments
cluster_log.log(deepcluster.images_lists)
def train(loader, model, crit, opt, epoch):
"""Training of the CNN.
Args:
loader (torch.utils.data.DataLoader): Data loader
model (nn.Module): CNN
crit (torch.nn): loss
opt (torch.optim.SGD): optimizer for every parameters with True
requires_grad in model except top layer
epoch (int)
"""
batch_time = AverageMeter()
losses = AverageMeter()
data_time = AverageMeter()
forward_time = AverageMeter()
backward_time = AverageMeter()
# switch to train mode
model.train()
# create an optimizer for the last fc layer
optimizer_tl = torch.optim.SGD(
model.top_layer.parameters(),
lr=args.lr,
weight_decay=10**args.wd,
)
end = time.time()
for i, (input_tensor, target) in enumerate(loader):
data_time.update(time.time() - end)
# save checkpoint
n = len(loader) * epoch + i
if n % args.checkpoints == 0:
path = os.path.join(
args.exp,
'checkpoints',
'checkpoint_' + str(n / args.checkpoints) + '.pth.tar',
)
if args.verbose:
print('Save checkpoint at: {0}'.format(path))
torch.save({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'optimizer' : opt.state_dict()
}, path)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input_tensor.cuda())
target_var = torch.autograd.Variable(target)
output = model(input_var)
loss = crit(output, target_var)
# record loss
losses.update(loss.data[0], input_tensor.size(0))
# compute gradient and do SGD step
opt.zero_grad()
optimizer_tl.zero_grad()
loss.backward()
opt.step()
optimizer_tl.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.verbose and (i % 200) == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data: {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss: {loss.val:.4f} ({loss.avg:.4f})'
.format(epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses))
return losses.avg
def compute_features(dataloader, model, N):
if args.verbose:
print('Compute features')
batch_time = AverageMeter()
end = time.time()
model.eval()
# discard the label information in the dataloader
for i, (input_tensor, _) in enumerate(dataloader):
input_var = torch.autograd.Variable(input_tensor.cuda(), volatile=True)
aux = model(input_var).data.cpu().numpy()
if i == 0:
features = np.zeros((N, aux.shape[1]), dtype='float32')
aux = aux.astype('float32')
if i < len(dataloader) - 1:
features[i * args.batch: (i + 1) * args.batch] = aux
else:
# special treatment for final batch
features[i * args.batch:] = aux
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.verbose and (i % 200) == 0:
print('{0} / {1}\t'
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f})'
.format(i, len(dataloader), batch_time=batch_time))
return features
if __name__ == '__main__':
args = parse_args()
main(args)
| deepcluster-main | main.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import time
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from util import AverageMeter, learning_rate_decay, load_model, Logger
parser = argparse.ArgumentParser(description="""Train linear classifier on top
of frozen convolutional layers of an AlexNet.""")
parser.add_argument('--data', type=str, help='path to dataset')
parser.add_argument('--model', type=str, help='path to model')
parser.add_argument('--conv', type=int, choices=[1, 2, 3, 4, 5],
help='on top of which convolutional layer train logistic regression')
parser.add_argument('--tencrops', action='store_true',
help='validation accuracy averaged over 10 crops')
parser.add_argument('--exp', type=str, default='', help='exp folder')
parser.add_argument('--workers', default=4, type=int,
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', type=int, default=90, help='number of total epochs to run (default: 90)')
parser.add_argument('--batch_size', default=256, type=int,
help='mini-batch size (default: 256)')
parser.add_argument('--lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum (default: 0.9)')
parser.add_argument('--weight_decay', '--wd', default=-4, type=float,
help='weight decay pow (default: -4)')
parser.add_argument('--seed', type=int, default=31, help='random seed')
parser.add_argument('--verbose', action='store_true', help='chatty')
def main():
global args
args = parser.parse_args()
#fix random seeds
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
best_prec1 = 0
# load model
model = load_model(args.model)
model.cuda()
cudnn.benchmark = True
# freeze the features layers
for param in model.features.parameters():
param.requires_grad = False
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if args.tencrops:
transformations_val = [
transforms.Resize(256),
transforms.TenCrop(224),
transforms.Lambda(lambda crops: torch.stack([normalize(transforms.ToTensor()(crop)) for crop in crops])),
]
else:
transformations_val = [transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize]
transformations_train = [transforms.Resize(256),
transforms.CenterCrop(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize]
train_dataset = datasets.ImageFolder(
traindir,
transform=transforms.Compose(transformations_train)
)
val_dataset = datasets.ImageFolder(
valdir,
transform=transforms.Compose(transformations_val)
)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=int(args.batch_size/2),
shuffle=False,
num_workers=args.workers)
# logistic regression
reglog = RegLog(args.conv, len(train_dataset.classes)).cuda()
optimizer = torch.optim.SGD(
filter(lambda x: x.requires_grad, reglog.parameters()),
args.lr,
momentum=args.momentum,
weight_decay=10**args.weight_decay
)
# create logs
exp_log = os.path.join(args.exp, 'log')
if not os.path.isdir(exp_log):
os.makedirs(exp_log)
loss_log = Logger(os.path.join(exp_log, 'loss_log'))
prec1_log = Logger(os.path.join(exp_log, 'prec1'))
prec5_log = Logger(os.path.join(exp_log, 'prec5'))
for epoch in range(args.epochs):
end = time.time()
# train for one epoch
train(train_loader, model, reglog, criterion, optimizer, epoch)
# evaluate on validation set
prec1, prec5, loss = validate(val_loader, model, reglog, criterion)
loss_log.log(loss)
prec1_log.log(prec1)
prec5_log.log(prec5)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if is_best:
filename = 'model_best.pth.tar'
else:
filename = 'checkpoint.pth.tar'
torch.save({
'epoch': epoch + 1,
'arch': 'alexnet',
'state_dict': model.state_dict(),
'prec5': prec5,
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, os.path.join(args.exp, filename))
class RegLog(nn.Module):
"""Creates logistic regression on top of frozen features"""
def __init__(self, conv, num_labels):
super(RegLog, self).__init__()
self.conv = conv
if conv==1:
self.av_pool = nn.AvgPool2d(6, stride=6, padding=3)
s = 9600
elif conv==2:
self.av_pool = nn.AvgPool2d(4, stride=4, padding=0)
s = 9216
elif conv==3:
self.av_pool = nn.AvgPool2d(3, stride=3, padding=1)
s = 9600
elif conv==4:
self.av_pool = nn.AvgPool2d(3, stride=3, padding=1)
s = 9600
elif conv==5:
self.av_pool = nn.AvgPool2d(2, stride=2, padding=0)
s = 9216
self.linear = nn.Linear(s, num_labels)
def forward(self, x):
x = self.av_pool(x)
x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
return self.linear(x)
def forward(x, model, conv):
if hasattr(model, 'sobel') and model.sobel is not None:
x = model.sobel(x)
count = 1
for m in model.features.modules():
if not isinstance(m, nn.Sequential):
x = m(x)
if isinstance(m, nn.ReLU):
if count == conv:
return x
count = count + 1
return x
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(train_loader, model, reglog, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# freeze also batch norm layers
model.eval()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
#adjust learning rate
learning_rate_decay(optimizer, len(train_loader) * epoch + i, args.lr)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input.cuda())
target_var = torch.autograd.Variable(target)
# compute output
output = forward(input_var, model, reglog.conv)
output = reglog(output)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.verbose and i % 100 == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
.format(epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, reglog, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
softmax = nn.Softmax(dim=1).cuda()
end = time.time()
for i, (input_tensor, target) in enumerate(val_loader):
if args.tencrops:
bs, ncrops, c, h, w = input_tensor.size()
input_tensor = input_tensor.view(-1, c, h, w)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input_tensor.cuda(), volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
output = reglog(forward(input_var, model, reglog.conv))
if args.tencrops:
output_central = output.view(bs, ncrops, -1)[: , ncrops / 2 - 1, :]
output = softmax(output)
output = torch.squeeze(output.view(bs, ncrops, -1).mean(1))
else:
output_central = output
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
top1.update(prec1[0], input_tensor.size(0))
top5.update(prec5[0], input_tensor.size(0))
loss = criterion(output_central, target_var)
losses.update(loss.data[0], input_tensor.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.verbose and i % 100 == 0:
print('Validation: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
.format(i, len(val_loader), batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
return top1.avg, top5.avg, losses.avg
if __name__ == '__main__':
main()
| deepcluster-main | eval_linear.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
from scipy.ndimage.filters import gaussian_filter
import sys
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
sys.path.insert(0, '..')
from util import load_model
parser = argparse.ArgumentParser(description='Gradient ascent visualisation')
parser.add_argument('--model', type=str, help='Model')
parser.add_argument('--arch', type=str, default='alexnet', choices=['alexnet', 'vgg16'], help='arch')
parser.add_argument('--conv', type=int, default=1, help='convolutional layer')
parser.add_argument('--exp', type=str, default='', help='path to res')
parser.add_argument('--lr', type=float, default=3, help='learning rate (default: 3)')
parser.add_argument('--wd', type=float, default=0.00001, help='weight decay (default: 10^-5)')
parser.add_argument('--sig', type=float, default=0.3, help='gaussian blur (default: 0.3)')
parser.add_argument('--step', type=int, default=5, help='number of iter between gaussian blurs (default: 5)')
parser.add_argument('--niter', type=int, default=1000, help='total number of iterations (default: 1000)')
parser.add_argument('--idim', type=int, default=224, help='size of input image (default: 224)')
CONV = {'alexnet': [96, 256, 384, 384, 256],
'vgg16': [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]}
def main():
args = parser.parse_args()
# sanity check
if args.arch == 'alexnet':
assert args.conv < 6
elif args.arch == 'vgg16':
assert args.conv < 14
# create repo
repo = os.path.join(args.exp, 'conv' + str(args.conv))
if not os.path.isdir(repo):
os.makedirs(repo)
# build model
model = load_model(args.model)
model.cuda()
for params in model.parameters():
params.requires_grad = False
model.eval()
def gradient_ascent(f):
print f,
sys.stdout.flush()
fname_out = '{0}/layer{1}-channel{2}.jpeg'.format(repo, args.conv, f)
img_noise = np.random.normal(size=(args.idim, args.idim, 3)) * 20 + 128
img_noise = img_noise.astype('float32')
inp = transforms.ToTensor()(img_noise)
inp = torch.unsqueeze(inp, 0)
for it in range(args.niter):
x = torch.autograd.Variable(inp.cuda(), requires_grad=True)
out = forward(model, args.conv-1, f, x)
criterion = nn.CrossEntropyLoss()
filt_var = torch.autograd.Variable(torch.ones(1).long()*f).cuda()
output = out.mean(3).mean(2)
loss = - criterion(output, filt_var) - args.wd*torch.norm(x)**2
# compute gradient
loss.backward()
# normalize gradient
grads = x.grad.data.cpu()
grads = grads.div(torch.norm(grads)+1e-8)
# apply gradient
inp = inp.add(args.lr*grads)
# gaussian blur
if it%args.step == 0:
inp = gaussian_filter(torch.squeeze(inp).numpy().transpose((2, 1, 0)),
sigma=(args.sig, args.sig, 0))
inp = torch.unsqueeze(torch.from_numpy(inp).float().transpose(2, 0), 0)
# save image at the last iteration
if it == args.niter - 1:
a = deprocess_image(inp.numpy())
Image.fromarray(a).save(fname_out)
map(gradient_ascent, range(CONV[args.arch][args.conv-1]))
def deprocess_image(x):
x = x[0, :, :, :]
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def forward(model, layer, channel, x):
if model.sobel is not None:
x = model.sobel(x)
count = 0
for y, m in enumerate(model.features.modules()):
if not isinstance(m, nn.Sequential):
x = m(x)
if isinstance(m, nn.Conv2d):
if count == layer:
res = x
if isinstance(m, nn.ReLU):
if count == layer:
# check if channel is not activated
if x[:, channel, :, :].mean().data.cpu().numpy() == 0:
return res
return x
count = count + 1
if __name__ == '__main__':
main()
| deepcluster-main | visu/gradient_ascent.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
from shutil import copyfile
import sys
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
sys.path.insert(0, '..')
from util import load_model
def parse_args():
parser = argparse.ArgumentParser(description='Retrieve images with maximal activations')
parser.add_argument('--data', type=str, help='path to dataset')
parser.add_argument('--model', type=str, help='Model')
parser.add_argument('--conv', type=int, default=1, help='convolutional layer')
parser.add_argument('--exp', type=str, default='', help='path to res')
parser.add_argument('--count', type=int, default=9, help='save this many images')
parser.add_argument('--workers', default=4, type=int,
help='number of data loading workers (default: 4)')
return parser.parse_args()
def main(args):
# create repo
repo = os.path.join(args.exp, 'conv' + str(args.conv))
if not os.path.isdir(repo):
os.makedirs(repo)
# build model
model = load_model(args.model)
model.cuda()
for params in model.parameters():
params.requires_grad = False
model.eval()
#load data
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
tra = [transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize]
# dataset
dataset = datasets.ImageFolder(args.data, transform=transforms.Compose(tra))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=256,
num_workers=args.workers)
# keys are filters and value are arrays with activation scores for the whole dataset
layers_activations = {}
for i, (input_tensor, _) in enumerate(dataloader):
input_var = torch.autograd.Variable(input_tensor.cuda(), volatile=True)
activations = forward(model, args.conv, input_var)
if i == 0:
layers_activations = {filt: np.zeros(len(dataset)) for filt in activations}
if i < len(dataloader) - 1:
e_idx = (i + 1) * 256
else:
e_idx = len(dataset)
s_idx = i * 256
for filt in activations:
layers_activations[filt][s_idx: e_idx] = activations[filt].cpu().data.numpy()
if i % 100 == 0:
print('{0}/{1}'.format(i, len(dataloader)))
# save top N images for each filter
for filt in layers_activations:
repofilter = os.path.join(repo, filt)
if not os.path.isdir(repofilter):
os.mkdir(repofilter)
top = np.argsort(layers_activations[filt])[::-1]
if args.count > 0:
top = top[:args.count]
for pos, img in enumerate(top):
src, _ = dataset.imgs[img]
copyfile(src, os.path.join(repofilter, "{}_{}".format(pos, src.split('/')[-1])))
def forward(model, my_layer, x):
if model.sobel is not None:
x = model.sobel(x)
layer = 1
res = {}
for m in model.features.modules():
if not isinstance(m, nn.Sequential):
x = m(x)
if isinstance(m, nn.ReLU):
if layer == my_layer:
for channel in range(int(x.size()[1])):
key = 'layer' + str(layer) + '-channel' + str(channel)
res[key] = torch.squeeze(x.mean(3).mean(2))[:, channel]
return res
layer = layer + 1
return res
if __name__ == '__main__':
args = parse_args()
main(args)
| deepcluster-main | visu/activ-retrieval.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from .vgg16 import *
from .alexnet import *
| deepcluster-main | models/__init__.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import math
from random import random as rd
__all__ = [ 'VGG', 'vgg16']
class VGG(nn.Module):
def __init__(self, features, num_classes, sobel):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(0.5),
nn.Linear(4096, 4096),
nn.ReLU(True)
)
self.top_layer = nn.Linear(4096, num_classes)
self._initialize_weights()
if sobel:
grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0)
grayscale.weight.data.fill_(1.0 / 3.0)
grayscale.bias.data.zero_()
sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1)
sobel_filter.weight.data[0,0].copy_(
torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
)
sobel_filter.weight.data[1,0].copy_(
torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
)
sobel_filter.bias.data.zero_()
self.sobel = nn.Sequential(grayscale, sobel_filter)
for p in self.sobel.parameters():
p.requires_grad = False
else:
self.sobel = None
def forward(self, x):
if self.sobel:
x = self.sobel(x)
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
if self.top_layer:
x = self.top_layer(x)
return x
def _initialize_weights(self):
for y,m in enumerate(self.modules()):
if isinstance(m, nn.Conv2d):
#print(y)
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
for i in range(m.out_channels):
m.weight.data[i].normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers(input_dim, batch_norm):
layers = []
in_channels = input_dim
cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def vgg16(sobel=False, bn=True, out=1000):
dim = 2 + int(not sobel)
model = VGG(make_layers(dim, bn), out, sobel)
return model
| deepcluster-main | models/vgg16.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
import numpy as np
import torch
import torch.nn as nn
__all__ = [ 'AlexNet', 'alexnet']
# (number of filters, kernel size, stride, pad)
CFG = {
'2012': [(96, 11, 4, 2), 'M', (256, 5, 1, 2), 'M', (384, 3, 1, 1), (384, 3, 1, 1), (256, 3, 1, 1), 'M']
}
class AlexNet(nn.Module):
def __init__(self, features, num_classes, sobel):
super(AlexNet, self).__init__()
self.features = features
self.classifier = nn.Sequential(nn.Dropout(0.5),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(0.5),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True))
self.top_layer = nn.Linear(4096, num_classes)
self._initialize_weights()
if sobel:
grayscale = nn.Conv2d(3, 1, kernel_size=1, stride=1, padding=0)
grayscale.weight.data.fill_(1.0 / 3.0)
grayscale.bias.data.zero_()
sobel_filter = nn.Conv2d(1, 2, kernel_size=3, stride=1, padding=1)
sobel_filter.weight.data[0, 0].copy_(
torch.FloatTensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]])
)
sobel_filter.weight.data[1, 0].copy_(
torch.FloatTensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
)
sobel_filter.bias.data.zero_()
self.sobel = nn.Sequential(grayscale, sobel_filter)
for p in self.sobel.parameters():
p.requires_grad = False
else:
self.sobel = None
def forward(self, x):
if self.sobel:
x = self.sobel(x)
x = self.features(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
if self.top_layer:
x = self.top_layer(x)
return x
def _initialize_weights(self):
for y, m in enumerate(self.modules()):
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
for i in range(m.out_channels):
m.weight.data[i].normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def make_layers_features(cfg, input_dim, bn):
layers = []
in_channels = input_dim
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=3, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v[0], kernel_size=v[1], stride=v[2], padding=v[3])
if bn:
layers += [conv2d, nn.BatchNorm2d(v[0]), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v[0]
return nn.Sequential(*layers)
def alexnet(sobel=False, bn=True, out=1000):
dim = 2 + int(not sobel)
model = AlexNet(make_layers_features(CFG['2012'], dim, bn=bn), out, sobel)
return model
| deepcluster-main | models/alexnet.py |
import argparse
import os
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torchaudio
import hydra
from omegaconf import OmegaConf
from torch.distributions import Categorical
from tqdm.auto import tqdm
from src import utils
from src.dataloaders.audio import mu_law_decode
from src.models.baselines.wavenet import WaveNetModel
from train import SequenceLightningModule
def test_step(model):
B, L = 2, 64
x = torch.ones(B, L, dtype=torch.long).to('cuda')
# Forward
batch = (x, None)
y, _, _ = model(batch) # Forward pass expects a batch which has both x and y (inputs and targets)
# Step
model._reset_state(batch, device='cuda')
ys = []
for x_ in torch.unbind(x, dim=-1):
y_ = model.step(x_)
ys.append(y_)
ys = torch.stack(ys, dim=1)
print(torch.norm(y-ys))
breakpoint()
@torch.inference_mode()
def generate(
model,
batch,
tau=1.0,
l_prefix=0,
T=None,
debug=False,
top_p=1.0,
benchmark=False,
return_logprobs=False,
):
x, _, *_ = batch # (B, L)
x = x.to('cuda')
T = x.shape[1] if T is None else T
# Special logic for WaveNet
if isinstance(model.model, WaveNetModel) and not benchmark:
l_prefix += model.model.receptive_field
T += model.model.receptive_field
x = F.pad(x, (model.model.receptive_field, 0), value=128)
# Set up the initial state
model._reset_state(batch, device='cuda')
# First sample
x_t = x[:, 0]
y_all = []
logprobs = np.zeros(x.shape[0])
entropy = np.zeros(x.shape[0])
if debug:
y_raw = []
# Generation loop
for t in tqdm(range(T)):
# Step through the model with the current sample
y_t = model.step(x_t)
# Handle special loss functions such as ProjectedAdaptiveSoftmax
if hasattr(model.loss, "compute_logits"): y_t = model.loss.compute_logits(y_t)
if debug:
y_raw.append(y_t.detach().cpu())
# Output distribution
probs = F.softmax(y_t, dim=-1)
# Optional: nucleus sampling
if top_p < 1.0:
sorted_probs = probs.sort(dim=-1, descending=True)
csum_probs = sorted_probs.values.cumsum(dim=-1) > top_p
csum_probs[..., 1:] = csum_probs[..., :-1].clone()
csum_probs[..., 0] = 0
indices_to_remove = torch.zeros_like(csum_probs)
indices_to_remove[torch.arange(sorted_probs.indices.shape[0])[:, None].repeat(1, sorted_probs.indices.shape[1]).flatten(), sorted_probs.indices.flatten()] = csum_probs.flatten()
y_t = y_t + indices_to_remove.int() * (-1e20)
# Sample from the distribution
y_t = Categorical(logits=y_t/tau).sample()
# Feed back to the model
if t < l_prefix-1:
x_t = x[:, t+1]
else:
x_t = y_t
# Calculate the log-likelihood
if return_logprobs:
probs = probs.squeeze(1)
if len(y_t.shape) > 1:
logprobs += torch.log(probs[torch.arange(probs.shape[0]), y_t.squeeze(1)]).cpu().numpy()
else:
logprobs += torch.log(probs[torch.arange(probs.shape[0]), y_t]).cpu().numpy()
entropy += -(probs * (probs + 1e-6).log()).sum(dim=-1).cpu().numpy()
y_all.append(x_t.cpu())
# y_all.append(y_t.cpu())
y_all = torch.stack(y_all, dim=1) # (batch, length)
if isinstance(model.model, WaveNetModel) and not benchmark:
y_all = y_all[:, model.model.receptive_field:]
if not return_logprobs:
if debug:
y_raw = torch.stack(y_raw)
return y_all, y_raw
return y_all
else:
assert not debug
return y_all, logprobs, entropy
@hydra.main(config_path="configs", config_name="generate.yaml")
def main(config: OmegaConf):
### See configs/generate.yaml for descriptions of generation flags ###
# Load train config from existing Hydra experiment
if config.experiment_path is not None:
config.experiment_path = hydra.utils.to_absolute_path(config.experiment_path)
experiment_config = OmegaConf.load(os.path.join(config.experiment_path, '.hydra', 'config.yaml'))
# config = OmegaConf.merge(config, experiment_config)
config.model = experiment_config.model
config.task = experiment_config.task
config.encoder = experiment_config.encoder
config.decoder = experiment_config.decoder
config.dataset = experiment_config.dataset
config.loader = experiment_config.loader
# Special override flags
if not config.load_data:
OmegaConf.update(config, "train.disable_dataset", True)
if config.n_batch is None:
config.n_batch = config.n_samples
OmegaConf.update(config, "loader.batch_size", config.n_batch)
# Create the Lightning Module - same as train.py
config = utils.train.process_config(config)
utils.train.print_config(config, resolve=True)
print("Loading model...")
assert torch.cuda.is_available(), 'Use a GPU for generation.'
if config.train.seed is not None:
pl.seed_everything(config.train.seed, workers=True)
# Define checkpoint path smartly
if not config.experiment_path:
ckpt_path = hydra.utils.to_absolute_path(config.checkpoint_path)
else:
ckpt_path = os.path.join(config.experiment_path, config.checkpoint_path)
print("Full checkpoint path:", ckpt_path)
# Load model
if ckpt_path.endswith('.ckpt'):
model = SequenceLightningModule.load_from_checkpoint(ckpt_path, config=config)
model.to('cuda')
elif ckpt_path.endswith('.pt'):
model = SequenceLightningModule(config)
model.to('cuda')
# Load checkpoint
state_dict = torch.load(ckpt_path, map_location='cuda')
model.load_state_dict(state_dict)
# Setup: required for S4 modules in SaShiMi
for module in model.modules():
if hasattr(module, '_setup_step'): module._setup_step()
model.eval()
if config.load_data:
# Get the eval dataloaders
eval_dataloaders = model.val_dataloader()
dl = eval_dataloaders[0] if config.split == 'val' else eval_dataloaders[1]
else:
assert config.l_prefix == 0, 'Only unconditional generation when data is not loaded.'
# Handle save directory intelligently
if config.save_dir:
save_dir = hydra.utils.to_absolute_path(config.save_dir)
else:
save_dir = os.path.join(os.getcwd(), "samples/")
os.makedirs(save_dir, exist_ok=True)
# Test
if config.test_model:
test_step(model)
# Generate
assert config.n_samples % config.n_batch == 0, "For convenience, n_samples should be a multiple of n_batch"
y = []
logprobs = []
for _ in range(config.n_samples // config.n_batch):
# Construct a batch
if config.load_data:
x, _, *_ = next(iter(dl))
batch = (x.repeat(config.n_reps, 1), None, None)
else:
batch = (torch.zeros(config.n_batch * config.n_reps, 1).to(torch.long) + 128, None, None)
_y, _logprobs, _ = generate(
model, # lightning module (SequenceLightningModule from `train.py`)
batch, # pass data to condition the generation
l_prefix=config.l_prefix, # length of conditioning prefix
T=config.l_sample, # length of generated sequence
top_p=config.top_p, # nucleus sampling: always set to 1.0 for SaShiMi experiments
tau=config.temp, # temperature: always set to 1.0 for SaShiMi experiments
return_logprobs=True, # calc exact likelihoods
)
y.append(_y)
logprobs.append(_logprobs)
# Sort based on likelihoods and save
y = torch.cat(y, dim=0)
logprobs = np.concatenate(logprobs, axis=0)
y = y[np.argsort(logprobs.flatten())]
# Decode quantization
if config.decode == 'audio':
print("Saving samples into:", save_dir)
y = mu_law_decode(y)
for i, d in enumerate(y):
filename = f'{save_dir}/unconditional_{config.dataset._name_}_{config.model._name_}_len_{config.l_sample/16000.:.2f}s_gen_{i+1}.wav'
torchaudio.save(filename, d.unsqueeze(0), 16000)
np.save(f'{save_dir}/unconditional_{config.dataset._name_}_{config.model._name_}_len_{config.l_sample/16000.:.2f}s_logprobs.npy', logprobs)
elif config.decode == 'text':
y = [model.dataset.vocab.get_symbols(_y) for _y in y]
breakpoint() # Inspect output manually for now
else: pass
if __name__ == "__main__":
main()
| state-spaces-main | generate.py |
'''
Train an S4 model on sequential CIFAR10 / sequential MNIST with PyTorch for demonstration purposes.
This code borrows heavily from https://github.com/kuangliu/pytorch-cifar.
This file only depends on the standalone S4 layer
available in /models/s4/
* Train standard sequential CIFAR:
python -m example
* Train sequential CIFAR grayscale:
python -m example --grayscale
* Train MNIST:
python -m example --dataset mnist --d_model 256 --weight_decay 0.0
The `S4Model` class defined in this file provides a simple backbone to train S4 models.
This backbone is a good starting point for many problems, although some tasks (especially generation)
may require using other backbones.
The default CIFAR10 model trained by this file should get
89+% accuracy on the CIFAR10 test set in 80 epochs.
Each epoch takes approximately 7m20s on a T4 GPU (will be much faster on V100 / A100).
'''
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import os
import argparse
from models.s4.s4 import S4Block as S4 # Can use full version instead of minimal S4D standalone below
from models.s4.s4d import S4D
from tqdm.auto import tqdm
# Dropout broke in PyTorch 1.11
if tuple(map(int, torch.__version__.split('.')[:2])) == (1, 11):
print("WARNING: Dropout is bugged in PyTorch 1.11. Results may be worse.")
dropout_fn = nn.Dropout
if tuple(map(int, torch.__version__.split('.')[:2])) >= (1, 12):
dropout_fn = nn.Dropout1d
else:
dropout_fn = nn.Dropout2d
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')
# Optimizer
parser.add_argument('--lr', default=0.01, type=float, help='Learning rate')
parser.add_argument('--weight_decay', default=0.01, type=float, help='Weight decay')
# Scheduler
# parser.add_argument('--patience', default=10, type=float, help='Patience for learning rate scheduler')
parser.add_argument('--epochs', default=100, type=float, help='Training epochs')
# Dataset
parser.add_argument('--dataset', default='cifar10', choices=['mnist', 'cifar10'], type=str, help='Dataset')
parser.add_argument('--grayscale', action='store_true', help='Use grayscale CIFAR10')
# Dataloader
parser.add_argument('--num_workers', default=4, type=int, help='Number of workers to use for dataloader')
parser.add_argument('--batch_size', default=64, type=int, help='Batch size')
# Model
parser.add_argument('--n_layers', default=4, type=int, help='Number of layers')
parser.add_argument('--d_model', default=128, type=int, help='Model dimension')
parser.add_argument('--dropout', default=0.1, type=float, help='Dropout')
parser.add_argument('--prenorm', action='store_true', help='Prenorm')
# General
parser.add_argument('--resume', '-r', action='store_true', help='Resume from checkpoint')
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0 # best test accuracy
start_epoch = 0 # start from epoch 0 or last checkpoint epoch
# Data
print(f'==> Preparing {args.dataset} data..')
def split_train_val(train, val_split):
train_len = int(len(train) * (1.0-val_split))
train, val = torch.utils.data.random_split(
train,
(train_len, len(train) - train_len),
generator=torch.Generator().manual_seed(42),
)
return train, val
if args.dataset == 'cifar10':
if args.grayscale:
transform = transforms.Compose([
transforms.Grayscale(),
transforms.ToTensor(),
transforms.Normalize(mean=122.6 / 255.0, std=61.0 / 255.0),
transforms.Lambda(lambda x: x.view(1, 1024).t())
])
else:
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
transforms.Lambda(lambda x: x.view(3, 1024).t())
])
# S4 is trained on sequences with no data augmentation!
transform_train = transform_test = transform
trainset = torchvision.datasets.CIFAR10(
root='./data/cifar/', train=True, download=True, transform=transform_train)
trainset, _ = split_train_val(trainset, val_split=0.1)
valset = torchvision.datasets.CIFAR10(
root='./data/cifar/', train=True, download=True, transform=transform_test)
_, valset = split_train_val(valset, val_split=0.1)
testset = torchvision.datasets.CIFAR10(
root='./data/cifar/', train=False, download=True, transform=transform_test)
d_input = 3 if not args.grayscale else 1
d_output = 10
elif args.dataset == 'mnist':
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.view(1, 784).t())
])
transform_train = transform_test = transform
trainset = torchvision.datasets.MNIST(
root='./data', train=True, download=True, transform=transform_train)
trainset, _ = split_train_val(trainset, val_split=0.1)
valset = torchvision.datasets.MNIST(
root='./data', train=True, download=True, transform=transform_test)
_, valset = split_train_val(valset, val_split=0.1)
testset = torchvision.datasets.MNIST(
root='./data', train=False, download=True, transform=transform_test)
d_input = 1
d_output = 10
else: raise NotImplementedError
# Dataloaders
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers)
valloader = torch.utils.data.DataLoader(
valset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
testloader = torch.utils.data.DataLoader(
testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
class S4Model(nn.Module):
def __init__(
self,
d_input,
d_output=10,
d_model=256,
n_layers=4,
dropout=0.2,
prenorm=False,
):
super().__init__()
self.prenorm = prenorm
# Linear encoder (d_input = 1 for grayscale and 3 for RGB)
self.encoder = nn.Linear(d_input, d_model)
# Stack S4 layers as residual blocks
self.s4_layers = nn.ModuleList()
self.norms = nn.ModuleList()
self.dropouts = nn.ModuleList()
for _ in range(n_layers):
self.s4_layers.append(
S4D(d_model, dropout=dropout, transposed=True, lr=min(0.001, args.lr))
)
self.norms.append(nn.LayerNorm(d_model))
self.dropouts.append(dropout_fn(dropout))
# Linear decoder
self.decoder = nn.Linear(d_model, d_output)
def forward(self, x):
"""
Input x is shape (B, L, d_input)
"""
x = self.encoder(x) # (B, L, d_input) -> (B, L, d_model)
x = x.transpose(-1, -2) # (B, L, d_model) -> (B, d_model, L)
for layer, norm, dropout in zip(self.s4_layers, self.norms, self.dropouts):
# Each iteration of this loop will map (B, d_model, L) -> (B, d_model, L)
z = x
if self.prenorm:
# Prenorm
z = norm(z.transpose(-1, -2)).transpose(-1, -2)
# Apply S4 block: we ignore the state input and output
z, _ = layer(z)
# Dropout on the output of the S4 block
z = dropout(z)
# Residual connection
x = z + x
if not self.prenorm:
# Postnorm
x = norm(x.transpose(-1, -2)).transpose(-1, -2)
x = x.transpose(-1, -2)
# Pooling: average pooling over the sequence length
x = x.mean(dim=1)
# Decode the outputs
x = self.decoder(x) # (B, d_model) -> (B, d_output)
return x
# Model
print('==> Building model..')
model = S4Model(
d_input=d_input,
d_output=d_output,
d_model=args.d_model,
n_layers=args.n_layers,
dropout=args.dropout,
prenorm=args.prenorm,
)
model = model.to(device)
if device == 'cuda':
cudnn.benchmark = True
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
checkpoint = torch.load('./checkpoint/ckpt.pth')
model.load_state_dict(checkpoint['model'])
best_acc = checkpoint['acc']
start_epoch = checkpoint['epoch']
def setup_optimizer(model, lr, weight_decay, epochs):
"""
S4 requires a specific optimizer setup.
The S4 layer (A, B, C, dt) parameters typically
require a smaller learning rate (typically 0.001), with no weight decay.
The rest of the model can be trained with a higher learning rate (e.g. 0.004, 0.01)
and weight decay (if desired).
"""
# All parameters in the model
all_parameters = list(model.parameters())
# General parameters don't contain the special _optim key
params = [p for p in all_parameters if not hasattr(p, "_optim")]
# Create an optimizer with the general parameters
optimizer = optim.AdamW(params, lr=lr, weight_decay=weight_decay)
# Add parameters with special hyperparameters
hps = [getattr(p, "_optim") for p in all_parameters if hasattr(p, "_optim")]
hps = [
dict(s) for s in sorted(list(dict.fromkeys(frozenset(hp.items()) for hp in hps)))
] # Unique dicts
for hp in hps:
params = [p for p in all_parameters if getattr(p, "_optim", None) == hp]
optimizer.add_param_group(
{"params": params, **hp}
)
# Create a lr scheduler
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=patience, factor=0.2)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs)
# Print optimizer info
keys = sorted(set([k for hp in hps for k in hp.keys()]))
for i, g in enumerate(optimizer.param_groups):
group_hps = {k: g.get(k, None) for k in keys}
print(' | '.join([
f"Optimizer group {i}",
f"{len(g['params'])} tensors",
] + [f"{k} {v}" for k, v in group_hps.items()]))
return optimizer, scheduler
criterion = nn.CrossEntropyLoss()
optimizer, scheduler = setup_optimizer(
model, lr=args.lr, weight_decay=args.weight_decay, epochs=args.epochs
)
###############################################################################
# Everything after this point is standard PyTorch training!
###############################################################################
# Training
def train():
model.train()
train_loss = 0
correct = 0
total = 0
pbar = tqdm(enumerate(trainloader))
for batch_idx, (inputs, targets) in pbar:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
pbar.set_description(
'Batch Idx: (%d/%d) | Loss: %.3f | Acc: %.3f%% (%d/%d)' %
(batch_idx, len(trainloader), train_loss/(batch_idx+1), 100.*correct/total, correct, total)
)
def eval(epoch, dataloader, checkpoint=False):
global best_acc
model.eval()
eval_loss = 0
correct = 0
total = 0
with torch.no_grad():
pbar = tqdm(enumerate(dataloader))
for batch_idx, (inputs, targets) in pbar:
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
eval_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
pbar.set_description(
'Batch Idx: (%d/%d) | Loss: %.3f | Acc: %.3f%% (%d/%d)' %
(batch_idx, len(dataloader), eval_loss/(batch_idx+1), 100.*correct/total, correct, total)
)
# Save checkpoint.
if checkpoint:
acc = 100.*correct/total
if acc > best_acc:
state = {
'model': model.state_dict(),
'acc': acc,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
torch.save(state, './checkpoint/ckpt.pth')
best_acc = acc
return acc
pbar = tqdm(range(start_epoch, args.epochs))
for epoch in pbar:
if epoch == 0:
pbar.set_description('Epoch: %d' % (epoch))
else:
pbar.set_description('Epoch: %d | Val acc: %1.3f' % (epoch, val_acc))
train()
val_acc = eval(epoch, valloader, checkpoint=True)
eval(epoch, testloader)
scheduler.step()
# print(f"Epoch {epoch} learning rate: {scheduler.get_last_lr()}")
| state-spaces-main | example.py |
import copy
import os
import random
import time
from functools import partial, wraps
from typing import Callable, List, Optional
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import wandb
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.utilities import rank_zero_only, rank_zero_warn
from tqdm.auto import tqdm
import src.models.nn.utils as U
import src.utils as utils
import src.utils.train
from src.dataloaders import SequenceDataset # TODO make registry
from src.tasks import decoders, encoders, tasks
from src.utils import registry
from src.utils.optim.ema import build_ema_optimizer
from src.utils.optim_groups import add_optimizer_hooks
log = src.utils.train.get_logger(__name__)
# Turn on TensorFloat32 (speeds up large model training substantially)
import torch.backends
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
# Lots of annoying hacks to get WandbLogger to continuously retry on failure
class DummyExperiment:
"""Dummy experiment."""
def nop(self, *args, **kw):
pass
def __getattr__(self, _):
return self.nop
def __getitem__(self, idx) -> "DummyExperiment":
# enables self.logger.experiment[0].add_image(...)
return self
def __setitem__(self, *args, **kwargs) -> None:
pass
def rank_zero_experiment(fn: Callable) -> Callable:
"""Returns the real experiment on rank 0 and otherwise the DummyExperiment."""
@wraps(fn)
def experiment(self):
@rank_zero_only
def get_experiment():
return fn(self)
return get_experiment() or DummyExperiment()
return experiment
class CustomWandbLogger(WandbLogger):
def __init__(self, *args, **kwargs):
"""Modified logger that insists on a wandb.init() call and catches wandb's error if thrown."""
super().__init__(*args, **kwargs)
@property
@rank_zero_experiment
def experiment(self):
r"""
Actual wandb object. To use wandb features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
.. code-block:: python
self.logger.experiment.some_wandb_function()
"""
if self._experiment is None:
if self._offline:
os.environ["WANDB_MODE"] = "dryrun"
attach_id = getattr(self, "_attach_id", None)
if wandb.run is not None:
# wandb process already created in this instance
rank_zero_warn(
"There is a wandb run already in progress and newly created instances of `WandbLogger` will reuse"
" this run. If this is not desired, call `wandb.finish()` before instantiating `WandbLogger`."
)
self._experiment = wandb.run
elif attach_id is not None and hasattr(wandb, "_attach"):
# attach to wandb process referenced
self._experiment = wandb._attach(attach_id)
else:
# create new wandb process
while True:
try:
self._experiment = wandb.init(**self._wandb_init)
break
except Exception as e:
print("wandb Exception:\n", e)
t = random.randint(30, 60)
print(f"Sleeping for {t} seconds")
time.sleep(t)
# define default x-axis
if getattr(self._experiment, "define_metric", None):
self._experiment.define_metric("trainer/global_step")
self._experiment.define_metric("*", step_metric="trainer/global_step", step_sync=True)
return self._experiment
class SequenceLightningModule(pl.LightningModule):
def __init__(self, config):
# Disable profiling executor. This reduces memory and increases speed.
try:
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
except AttributeError:
pass
super().__init__()
# Passing in config expands it one level, so can access by self.hparams.train instead of self.hparams.config.train
self.save_hyperparameters(config, logger=False)
# Dataset arguments
self.dataset = SequenceDataset.registry[self.hparams.dataset._name_](
**self.hparams.dataset
)
# Check hparams
self._check_config()
# PL has some bugs, so add hooks and make sure they're only called once
self._has_setup = False
self.setup() ## Added by KS
def setup(self, stage=None):
if not self.hparams.train.disable_dataset:
self.dataset.setup()
# We need to set up the model in setup() because for some reason when training with DDP, one GPU uses much more memory than the others
# In order to not overwrite the model multiple times during different stages, we need this hack
# TODO PL 1.5 seems to have an option to skip hooks to avoid this
# https://github.com/PyTorchLightning/pytorch-lightning/issues/5410#issuecomment-762257024
if self._has_setup:
return
else:
self._has_setup = True
# Convenience feature: if model specifies encoder, combine it with main encoder
encoder_cfg = utils.to_list(self.hparams.encoder) + utils.to_list(
self.hparams.model.pop("encoder", None)
)
decoder_cfg = utils.to_list(
self.hparams.model.pop("decoder", None)
) + utils.to_list(self.hparams.decoder)
# Instantiate model
self.model = utils.instantiate(registry.model, self.hparams.model)
if (name := self.hparams.train.post_init_hook['_name_']) is not None:
kwargs = self.hparams.train.post_init_hook.copy()
del kwargs['_name_']
for module in self.modules():
if hasattr(module, name):
getattr(module, name)(**kwargs)
# Instantiate the task
self.task = utils.instantiate(
tasks.registry, self.hparams.task, dataset=self.dataset, model=self.model
)
# Create encoders and decoders
encoder = encoders.instantiate(
encoder_cfg, dataset=self.dataset, model=self.model
)
decoder = decoders.instantiate(
decoder_cfg, model=self.model, dataset=self.dataset
)
# Extract the modules so they show up in the top level parameter count
self.encoder = U.PassthroughSequential(self.task.encoder, encoder)
self.decoder = U.PassthroughSequential(decoder, self.task.decoder)
self.loss = self.task.loss
self.loss_val = self.task.loss
if hasattr(self.task, 'loss_val'):
self.loss_val = self.task.loss_val
self.metrics = self.task.metrics
# Handle state logic
self._initialize_state()
def load_state_dict(self, state_dict, strict=True):
if self.hparams.train.pretrained_model_state_hook['_name_'] is not None:
model_state_hook = utils.instantiate(
registry.model_state_hook,
self.hparams.train.pretrained_model_state_hook.copy(),
partial=True,
)
# Modify the checkpoint['state_dict'] inside model_state_hook e.g. to inflate 2D convs to 3D convs
state_dict = model_state_hook(self.model, state_dict)
print("Custom load_state_dict function is running.")
# note, it needs to return something from the normal function we overrided
return super().load_state_dict(state_dict, strict=strict)
def _check_config(self):
assert self.hparams.train.state.mode in [None, "none", "null", "reset", "bptt", "tbptt"]
assert (
(n := self.hparams.train.state.n_context) is None
or isinstance(n, int)
and n >= 0
)
assert (
(n := self.hparams.train.state.n_context_eval) is None
or isinstance(n, int)
and n >= 0
)
def _initialize_state(self):
"""Called at model setup and start of epoch to completely reset state"""
self._state = None
self._memory_chunks = []
def _reset_state(self, batch, device=None):
"""Called to construct default_state when necessary, e.g. during BPTT"""
device = device or batch[0].device
self._state = self.model.default_state(*batch[0].shape[:1], device=device)
def _detach_state(self, state):
if isinstance(state, torch.Tensor):
return state.detach()
elif isinstance(state, tuple):
return tuple(self._detach_state(s) for s in state)
elif isinstance(state, list):
return [self._detach_state(s) for s in state]
elif isinstance(state, dict):
return {k: self._detach_state(v) for k, v in state.items()}
elif state is None:
return None
else:
raise NotImplementedError
def _process_state(self, batch, batch_idx, train=True):
"""Handle logic for state context."""
# Number of context steps
key = "n_context" if train else "n_context_eval"
n_context = self.hparams.train.state.get(key)
# Don't need to do anything if 0 context steps. Make sure there is no state
if n_context == 0 and self.hparams.train.state.mode not in ['tbptt']:
self._initialize_state()
return
# Reset state if needed
if self.hparams.train.state.mode == "reset":
if batch_idx % (n_context + 1) == 0:
self._reset_state(batch)
# Pass through memory chunks
elif self.hparams.train.state.mode == "bptt":
self._reset_state(batch)
with torch.no_grad(): # should be unnecessary because individual modules should handle this
for _batch in self._memory_chunks:
self.forward(_batch)
# Prepare for next step
self._memory_chunks.append(batch)
self._memory_chunks = self._memory_chunks[-n_context:]
elif self.hparams.train.state.mode == 'tbptt':
_, _, z = batch
reset = z["reset"]
if reset:
self._reset_state(batch)
else:
self._state = self._detach_state(self._state)
def _on_epoch_start(self):
self._initialize_state()
def forward(self, batch):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, *z = batch # z holds extra dataloader info such as resolution
if len(z) == 0:
z = {}
else:
assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
z = z[0]
x, w = self.encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
x, state = self.model(x, **w, state=self._state)
self._state = state
x, w = self.decoder(x, state=state, **z)
return x, y, w
def step(self, x_t):
x_t, *_ = self.encoder(x_t) # Potential edge case for encoders that expect (B, L, H)?
x_t, state = self.model.step(x_t, state=self._state)
self._state = state
# x_t = x_t[:, None, ...] # Dummy length
# x_t, *_ = self.decoder(x_t, state=state)
# x_t = x_t[:, 0, ...]
x_t, *_ = self.decoder.step(x_t, state=state)
return x_t
def _shared_step(self, batch, batch_idx, prefix="train"):
self._process_state(batch, batch_idx, train=(prefix == "train"))
x, y, w = self.forward(batch)
# Loss
if prefix == 'train':
loss = self.loss(x, y, **w)
else:
loss = self.loss_val(x, y, **w)
# Metrics
metrics = self.metrics(x, y, **w)
metrics["loss"] = loss
metrics = {f"{prefix}/{k}": v for k, v in metrics.items()}
# Calculate torchmetrics: these are accumulated and logged at the end of epochs
self.task.torchmetrics(x, y, prefix)
self.log_dict(
metrics,
on_step=False,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
return loss
def on_train_epoch_start(self):
self._on_epoch_start()
# Reset training torchmetrics
self.task._reset_torchmetrics("train")
def on_train_epoch_end(self):
# Log training torchmetrics
super().on_train_epoch_end()
self.log_dict(
{f"train/{k}": v for k, v in self.task.get_torchmetrics("train").items()},
on_step=False,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
def on_validation_epoch_start(self):
self._on_epoch_start()
# Reset all validation torchmetrics
for name in self.val_loader_names:
self.task._reset_torchmetrics(name)
def on_validation_epoch_end(self):
# Log all validation torchmetrics
super().on_validation_epoch_end()
for name in self.val_loader_names:
self.log_dict(
{f"{name}/{k}": v for k, v in self.task.get_torchmetrics(name).items()},
on_step=False,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
def on_test_epoch_start(self):
self._on_epoch_start()
# Reset all test torchmetrics
for name in self.test_loader_names:
self.task._reset_torchmetrics(name)
def on_test_epoch_end(self):
# Log all test torchmetrics
super().on_test_epoch_end()
for name in self.test_loader_names:
self.log_dict(
{f"{name}/{k}": v for k, v in self.task.get_torchmetrics(name).items()},
on_step=False,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
def training_step(self, batch, batch_idx):
loss = self._shared_step(batch, batch_idx, prefix="train")
# Log the loss explicitly so it shows up in WandB
# Note that this currently runs into a bug in the progress bar with ddp (as of 1.4.6)
# https://github.com/PyTorchLightning/pytorch-lightning/pull/9142
# We additionally log the epochs under 'trainer' to get a consistent prefix with 'global_step'
loss_epoch = {"trainer/loss": loss, "trainer/epoch": self.current_epoch}
self.log_dict(
loss_epoch,
on_step=True,
on_epoch=False,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
# Log any extra info that the models want to expose (e.g. output norms)
metrics = {}
for module in list(self.modules())[1:]:
if hasattr(module, "metrics"):
metrics.update(module.metrics)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
return loss
def validation_step(self, batch, batch_idx, dataloader_idx=0):
ema = (
self.val_loader_names[dataloader_idx].endswith("/ema")
and self.optimizers().optimizer.stepped
) # There's a bit of an annoying edge case with the first (0-th) epoch; it has to be excluded due to the initial sanity check
if ema:
self.optimizers().swap_ema()
loss = self._shared_step(
batch, batch_idx, prefix=self.val_loader_names[dataloader_idx]
)
if ema:
self.optimizers().swap_ema()
return loss
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self._shared_step(
batch, batch_idx, prefix=self.test_loader_names[dataloader_idx]
)
def configure_optimizers(self):
# Set zero weight decay for some params
if 'optimizer_param_grouping' in self.hparams.train:
add_optimizer_hooks(self.model, **self.hparams.train.optimizer_param_grouping)
# Normal parameters
all_params = list(self.parameters())
params = [p for p in all_params if not hasattr(p, "_optim")]
# Construct optimizer, add EMA if necessary
if self.hparams.train.ema > 0.0:
optimizer = utils.instantiate(
registry.optimizer,
self.hparams.optimizer,
params,
wrap=build_ema_optimizer,
polyak=self.hparams.train.ema,
)
else:
optimizer = utils.instantiate(registry.optimizer, self.hparams.optimizer, params)
del self.hparams.optimizer._name_
# Add parameters with special hyperparameters
hps = [getattr(p, "_optim") for p in all_params if hasattr(p, "_optim")]
hps = [
# dict(s) for s in set(frozenset(hp.items()) for hp in hps)
dict(s) for s in sorted(list(dict.fromkeys(frozenset(hp.items()) for hp in hps)))
# dict(s) for s in dict.fromkeys(frozenset(hp.items()) for hp in hps)
] # Unique dicts
print("Hyperparameter groups", hps)
for hp in hps:
params = [p for p in all_params if getattr(p, "_optim", None) == hp]
optimizer.add_param_group(
{"params": params, **self.hparams.optimizer, **hp}
)
### Layer Decay ###
if self.hparams.train.layer_decay['_name_'] is not None:
get_num_layer = utils.instantiate(
registry.layer_decay,
self.hparams.train.layer_decay['_name_'],
partial=True,
)
# Go through all parameters and get num layer
layer_wise_groups = {}
num_max_layers = 0
for name, p in self.named_parameters():
# Get layer id for each parameter in the model
layer_id = get_num_layer(name)
# Add to layer wise group
if layer_id not in layer_wise_groups:
layer_wise_groups[layer_id] = {
'params': [],
'lr': None,
'weight_decay': self.hparams.optimizer.weight_decay
}
layer_wise_groups[layer_id]['params'].append(p)
if layer_id > num_max_layers: num_max_layers = layer_id
# Update lr for each layer
for layer_id, group in layer_wise_groups.items():
group['lr'] = self.hparams.optimizer.lr * (self.hparams.train.layer_decay.decay ** (num_max_layers - layer_id))
# Reset the torch optimizer's param groups
optimizer.param_groups = []
for layer_id, group in layer_wise_groups.items():
optimizer.add_param_group(group)
# Print optimizer info for debugging
keys = set([k for hp in hps for k in hp.keys()]) # Special hparams
utils.train.log_optimizer(log, optimizer, keys)
# Configure scheduler
if "scheduler" not in self.hparams:
return optimizer
lr_scheduler = utils.instantiate(
registry.scheduler, self.hparams.scheduler, optimizer
)
scheduler = {
"scheduler": lr_scheduler,
"interval": self.hparams.train.interval, # 'epoch' or 'step'
"monitor": self.hparams.train.monitor,
"name": "trainer/lr", # default is e.g. 'lr-AdamW'
}
# See documentation for how to configure the return
# https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.core.lightning.html#pytorch_lightning.core.lightning.LightningModule.configure_optimizers
return [optimizer], [scheduler]
def train_dataloader(self):
train_loader = self.dataset.train_dataloader(**self.hparams.loader)
# Print stats in a try block since some dataloaders might not have a length?
try:
log.info(
f"Loaded 'train' dataloader:".ljust(30) +
f"{len(train_loader.dataset):7} examples | {len(train_loader):6} steps"
)
except:
pass
return train_loader
def _eval_dataloaders_names(self, loaders, prefix):
"""Process loaders into a list of names and loaders"""
if utils.is_dict(loaders):
return [
f"{prefix}/{k}" if k is not None else prefix for k in loaders.keys()
], list(loaders.values())
elif utils.is_list(loaders):
return [f"{prefix}/{i}" for i in range(len(loaders))], loaders
else:
return [prefix], [loaders]
def _eval_dataloaders(self):
# Return all val + test loaders
val_loaders = self.dataset.val_dataloader(**self.hparams.loader)
test_loaders = self.dataset.test_dataloader(**self.hparams.loader)
val_loader_names, val_loaders = self._eval_dataloaders_names(val_loaders, "val")
test_loader_names, test_loaders = self._eval_dataloaders_names(
test_loaders, "test"
)
# Duplicate datasets for ema
if self.hparams.train.ema > 0.0:
val_loader_names += [name + "/ema" for name in val_loader_names]
val_loaders = val_loaders + val_loaders
test_loader_names += [name + "/ema" for name in test_loader_names]
test_loaders = test_loaders + test_loaders
# adding option to only have val loader at eval (eg if test is duplicate)
if self.hparams.train.get("remove_test_loader_in_eval", None) is not None:
eval_loader_names = val_loader_names
eval_loaders = val_loaders
# default behavior is to add test loaders in eval
else:
eval_loader_names = val_loader_names + test_loader_names
eval_loaders = val_loaders + test_loaders
return eval_loader_names, eval_loaders
def val_dataloader(self):
val_loader_names, val_loaders = self._eval_dataloaders()
self.val_loader_names = val_loader_names
try:
for name, loader in zip(val_loader_names, val_loaders):
log.info(
f"Loaded '{name}' dataloader:".ljust(30) +
f"{len(loader.dataset):7} examples | {len(loader):6} steps"
)
except:
pass
return val_loaders
def test_dataloader(self):
test_loader_names, test_loaders = self._eval_dataloaders()
self.test_loader_names = ["final/" + name for name in test_loader_names]
return test_loaders
### pytorch-lightning utils and entrypoint ###
def create_trainer(config):
callbacks: List[pl.Callback] = []
logger = None
# WandB Logging
if config.get("wandb") is not None:
# Pass in wandb.init(config=) argument to get the nice 'x.y.0.z' hparams logged
# Can pass in config_exclude_keys='wandb' to remove certain groups
import wandb
logger = CustomWandbLogger(
config=utils.to_dict(config, recursive=True),
settings=wandb.Settings(start_method="fork"),
**config.wandb,
)
# Lightning callbacks
if "callbacks" in config:
for _name_, callback in config.callbacks.items():
if callback is None: continue
if config.get("wandb") is None and _name_ in ["learning_rate_monitor"]:
continue
log.info(f"Instantiating callback <{registry.callbacks[_name_]}>")
callback._name_ = _name_
callbacks.append(utils.instantiate(registry.callbacks, callback))
# Profiler
profiler = None
if config.trainer.get("profiler", None) is not None:
profiler = hydra.utils.instantiate(config.trainer.profiler)
config.trainer.pop("profiler")
# Configure ddp automatically
if config.trainer.accelerator == 'gpu' and config.trainer.devices > 1:
print("ddp automatically configured, more than 1 gpu used!")
config.trainer.strategy = "ddp"
# Add ProgressiveResizing callback
if config.callbacks.get("progressive_resizing", None) is not None:
num_stages = len(config.callbacks.progressive_resizing.stage_params)
print(f"Progressive Resizing: {num_stages} stages")
for i, e in enumerate(config.callbacks.progressive_resizing.stage_params):
# Stage params are resolution and epochs, pretty print
print(f"\tStage {i}: {e['resolution']} @ {e['epochs']} epochs")
# Additional ModelCheckpoint callback for preemption
if config.tolerance.id is not None:
pass
# if 'model_checkpoint' in config.callbacks.keys():
# callback_args = config.callbacks['model_checkpoint']
# callback_args._name_ = 'model_checkpoint' # For the registry
# # Save last two checkpoints to be extra fault tolerant
# callback_args.save_top_k = 2
# callback_args.monitor = 'trainer/epoch'
# callback_args.mode = 'max'
# callback_args.save_last = False
# callback_args.filename = 'last'
# # callback_args.save_on_train_epoch_end = True # this is False for the other checkpoint callback
# ckpt_callback = utils.instantiate(registry.callbacks, callback_args)
# # ckpt_callback.CHECKPOINT_NAME_LAST = 'last_' # now we have two last checkpoints, last.ckpt and last_.ckpt
# callbacks.append(ckpt_callback)
trainer = pl.Trainer(
logger=logger,
callbacks=callbacks,
profiler=profiler,
**config.trainer,
)
return trainer
def train(config):
if config.train.seed is not None:
pl.seed_everything(config.train.seed, workers=True)
trainer = create_trainer(config)
model = SequenceLightningModule(config)
# Load pretrained_model if specified
if config.train.get("pretrained_model_path", None) is not None:
# PTL style. Note, method returns a new model object, and need to pass config.
model = SequenceLightningModule.load_from_checkpoint(
config.train.pretrained_model_path,
config=config,
strict=config.train.pretrained_model_strict_load,
)
print("Loaded pretrained model from", config.train.pretrained_model_path)
# Added by KS for pre-training
# [22-07-21 AG] refactored, untested
if config.train.get("ignore_pretrained_layers", False):
pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
for k, v in model_dict.items():
for ignore_layer in config.train.ignore_pretrained_layers:
if ignore_layer in k:
pretrained_dict[k] = v
model.load_state_dict(pretrained_dict)
if config.train.get("pretrained_freeze_encoder", False):
for name, param in model.named_parameters():
if not("decoder" in name): param.requires_grad = False
# Run initial validation epoch (useful for debugging, finetuning)
if config.train.validate_at_start:
print("Running validation before training")
trainer.validate(model)
if config.train.ckpt is not None:
trainer.fit(model, ckpt_path=config.train.ckpt)
else:
trainer.fit(model)
if config.train.test:
trainer.test(model)
def preemption_setup(config):
if config.tolerance.id is None:
return config
# Create path ./logdir/id/ to store information for resumption
resume_dir = os.path.join(get_original_cwd(), config.tolerance.logdir, str(config.tolerance.id))
if os.path.exists(resume_dir):
print(f"Resuming from {resume_dir}")
# Load path to the last checkpoint
with open(os.path.join(resume_dir, "hydra.txt"), "r") as f:
hydra_paths = list(f.readlines())
# Look at the previous runs in reverse order
checkpoint_path = None
for hydra_path in reversed(hydra_paths):
hydra_path = hydra_path.rstrip('\n')
# Get the paths to the last.ckpt and last_.ckpt files
last_path = os.path.join(hydra_path, "checkpoints", "last.ckpt")
# last__path = os.path.join(hydra_path, "checkpoints", "last_.ckpt")
# last_exists, last__exists = os.path.exists(last_path), os.path.exists(last__path)
# if not last_exists or not last__exists:
# # This run doesn't have both checkpoints, so skip it
# print(f"\tSkipping {hydra_path}, not suitable for resuming (last_exists = {last_exists}, last__exists = {last__exists})")
# continue
# # Read timestamp when checkpoints were modified
# # We want to load the _earlier_ checkpoint, since that is guaranteed to be uncorrupted
# last_timestamp = os.path.getmtime(last_path)
# last__timestamp = os.path.getmtime(last__path)
# print("\t\tlast_timestamp =", last_timestamp)
# print("\t\tlast__timestamp =", last__timestamp)
# if last_timestamp < last__timestamp:
# checkpoint_path = last_path
# else:
# checkpoint_path = last__path
# checkpoint_path = last_path
# config.train.ckpt = checkpoint_path
if os.path.exists(last_path):
print("\tFound checkpoint at", last_path)
config.train.ckpt = last_path
# HACK TODO
config.train.pretrained_model_path = None
config.train.pretrained_model_state_hook._name_ = None
# config.train.pretrained_model_reinit_hook._name_ = None
break
# If we didn't find a checkpoint
if checkpoint_path is None:
print("\tNo suitable checkpoint found, starting from scratch")
# Set wandb run id to resume
if os.path.exists(os.path.join(hydra_path, 'wandb')):
run_info = [e for e in os.listdir(os.path.join(hydra_path, 'wandb')) if e.startswith('run-')][0]
run_id = run_info.split('-')[-1]
try:
config.wandb.id = run_id
except AttributeError:
pass
os.makedirs(resume_dir, exist_ok=True)
# Store path to Hydra output folder
with open(os.path.join(resume_dir, 'hydra.txt'), 'a') as f:
f.write(os.getcwd() + '\n')
return config
@hydra.main(config_path="configs", config_name="config.yaml")
def main(config: OmegaConf):
# Process config:
# - register evaluation resolver
# - filter out keys used only for interpolation
# - optional hooks, including disabling python warnings or debug friendly configuration
config = utils.train.process_config(config)
# Pretty print config using Rich library
utils.train.print_config(config, resolve=True)
config = preemption_setup(config)
train(config)
if __name__ == "__main__":
main()
| state-spaces-main | train.py |
import argparse
import torch
from pathlib import Path
from train import SequenceLightningModule
parser = argparse.ArgumentParser()
parser.add_argument("ckpt_path", type=str)
args = parser.parse_args()
ckpt = torch.load(args.ckpt_path, map_location='cuda')
state_dict = ckpt['state_dict']
torch.save(state_dict, Path(args.ckpt_path).with_suffix(".pt"))
| state-spaces-main | checkpoints/convert_pl_to_pt.py |
from tqdm.auto import tqdm
import hydra
import torch
import numpy as np
from pathlib import Path
import pytorch_lightning as pl
import matplotlib.pyplot as plt
from torch.nn.modules import module
import torch.nn.functional as F
from torch.distributions import Categorical
from src import utils
from einops import rearrange, repeat, reduce
from train import SequenceLightningModule
from omegaconf import OmegaConf
@hydra.main(config_path="../configs", config_name="generate.yaml")
def main(config: OmegaConf):
# Load train config from existing Hydra experiment
if config.experiment_path is not None:
config.experiment_path = hydra.utils.to_absolute_path(config.experiment_path)
experiment_config = OmegaConf.load(os.path.join(config.experiment_path, '.hydra', 'config.yaml'))
config.model = experiment_config.model
config.task = experiment_config.task
config.encoder = experiment_config.encoder
config.decoder = experiment_config.decoder
config.dataset = experiment_config.dataset
config.loader = experiment_config.loader
config = utils.train.process_config(config)
utils.train.print_config(config, resolve=True)
if config.train.seed is not None:
pl.seed_everything(config.train.seed, workers=True)
# Define checkpoint path smartly
if not config.experiment_path:
ckpt_path = hydra.utils.to_absolute_path(config.checkpoint_path)
else:
ckpt_path = os.path.join(config.experiment_path, config.checkpoint_path)
print("Full checkpoint path:", ckpt_path)
# Load model
if ckpt_path.endswith('.ckpt'):
model = SequenceLightningModule.load_from_checkpoint(ckpt_path, config=config)
model.to('cuda')
elif ckpt_path.endswith('.pt'):
model = SequenceLightningModule(config)
model.to('cuda')
# Load checkpoint
state_dict = torch.load(ckpt_path, map_location='cuda')
model.load_state_dict(state_dict)
model.eval()
## Test single batch
debug = False
if debug:
val_dataloaders = model.val_dataloader()
loader = val_dataloaders[0] if isinstance(val_dataloaders, list) else val_dataloaders
model = model.to('cuda')
model.eval()
batch = next(iter(loader))
batch = (batch[0].cuda(), batch[1].cuda(), batch[2])
model.model.layers[0].layer.kernel()
with torch.no_grad():
x, y, *w = model.forward(batch)
loss = model.loss_val(x, y, *w)
print("Single batch loss:", loss)
## Use PL test to calculate final metrics
from train import create_trainer
trainer = create_trainer(config)
trainer.test(model)
if __name__ == '__main__':
main()
| state-spaces-main | checkpoints/evaluate.py |
"""Convert a V3 model to V4. See checkpoints/README.md for usage."""
from tqdm.auto import tqdm
import hydra
import torch
import numpy as np
from pathlib import Path
import pytorch_lightning as pl
import matplotlib.pyplot as plt
from torch.nn.modules import module
import torch.nn.functional as F
from torch.distributions import Categorical
from src import utils
from einops import rearrange, repeat, reduce
from train import SequenceLightningModule
from omegaconf import OmegaConf
def convert_dt(state_dict):
"""Unsqueeze log_dt shape to match new shape."""
new_state_dict = {}
for k, v in state_dict.items():
# Unsqueeze log_dt shape [D] -> [D, 1]
if "log_dt" in k:
v = v.unsqueeze(dim=-1)
new_key = k.replace('log_dt', 'inv_dt')
new_state_dict[new_key] = v
return new_state_dict
def convert_a(state_dict):
"""Convert names of A_real and A_imag inside kernel."""
new_state_dict = {}
for k, v in state_dict.items():
k = k.replace('inv_w_real', 'A_real')
k = k.replace('log_w_real', 'A_real')
k = k.replace('w_imag', 'A_imag')
new_state_dict[k] = v
# Negative A imaginary part
for k, v in new_state_dict.items():
if k.endswith('A_imag'):
new_state_dict[k] = -v
return new_state_dict
def convert_kernel(state_dict):
"""Replace nested kernel with flat kernel and replace L param."""
new_state_dict = {}
for k, v in state_dict.items():
k = k.replace('kernel.kernel.L', 'kernel.kernel.l_kernel')
k = k.replace('kernel.kernel', 'kernel')
new_state_dict[k] = v
return new_state_dict
def convert_conv(state_dict):
"""Move FFTConv parameters a layer deeper."""
new_state_dict = {}
for k, v in state_dict.items():
k = k.replace('layer.kernel', 'layer.layer.kernel')
k = k.replace('layer.D', 'layer.layer.D')
new_state_dict[k] = v
return new_state_dict
def convert_checkpoint(ckpt_path):
checkpoint = torch.load(ckpt_path, map_location='cuda')
if ckpt_path.endswith('.ckpt'):
state_dict = checkpoint['state_dict']
elif ckpt_path.endswith('.pt'):
state_dict = checkpoint
else:
raise NotImplementedError
new_state_dict = convert_dt(state_dict)
new_state_dict = convert_a(new_state_dict)
new_state_dict = convert_kernel(new_state_dict)
new_state_dict = convert_conv(new_state_dict)
if ckpt_path.endswith('.ckpt'):
checkpoint['state_dict'] = new_state_dict
else:
checkpoint = new_state_dict
return checkpoint
@hydra.main(config_path="../configs", config_name="generate.yaml")
def main(config: OmegaConf):
# Load train config from existing Hydra experiment
if config.experiment_path is not None:
config.experiment_path = hydra.utils.to_absolute_path(config.experiment_path)
experiment_config = OmegaConf.load(os.path.join(config.experiment_path, '.hydra', 'config.yaml'))
config.model = experiment_config.model
config.task = experiment_config.task
config.encoder = experiment_config.encoder
config.decoder = experiment_config.decoder
config.dataset = experiment_config.dataset
config.loader = experiment_config.loader
config = utils.train.process_config(config)
utils.train.print_config(config, resolve=True)
if config.train.seed is not None:
pl.seed_everything(config.train.seed, workers=True)
# Define checkpoint path smartly
if not config.experiment_path:
ckpt_path = hydra.utils.to_absolute_path(config.checkpoint_path)
else:
ckpt_path = os.path.join(config.experiment_path, config.checkpoint_path)
print("Full checkpoint path:", ckpt_path)
# Port checkpoint
checkpoint = convert_checkpoint(ckpt_path)
print("Finished converting checkpoint.")
# Test single batch
if config.test_model:
# Load checkpoint
model = SequenceLightningModule(config)
model.to('cuda')
if ckpt_path.endswith('.ckpt'):
model.load_state_dict(checkpoint['state_dict'])
elif ckpt_path.endswith('.pt'):
model.load_state_dict(checkpoint)
# Dataloader
val_dataloaders = model.val_dataloader()
loader = val_dataloaders[0] if isinstance(val_dataloaders, list) else val_dataloaders
model = model.to('cuda')
model.eval()
batch = next(iter(loader))
batch = (batch[0].cuda(), batch[1].cuda(), batch[2])
with torch.no_grad():
x, y, w = model.forward(batch)
loss = model.loss_val(x, y, **w)
print("Single batch loss:", loss)
## Use PL test to calculate final metrics
from train import create_trainer
trainer = create_trainer(config)
trainer.test(model)
path = Path(ckpt_path).absolute()
filename_new = path.stem + "_v4" + path.suffix
print("Saving to", filename_new)
torch.save(checkpoint, path.parent / filename_new)
if __name__ == '__main__':
main()
| state-spaces-main | checkpoints/convert_v3_to_v4.py |
"""Standalone version of Structured State Space sequence model (S4)."""
from collections import defaultdict
from typing import Optional, Mapping, Tuple, Union
import logging
from functools import partial
import math
import numpy as np
from scipy import special as ss
import torch
import torch.nn as nn
import torch.nn.functional as F
from pytorch_lightning.utilities import rank_zero_only
from einops import rearrange, repeat
# Function aliases
contract = torch.einsum
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
_c2r = torch.view_as_real
_r2c = torch.view_as_complex
if tuple(map(int, torch.__version__.split('.')[:2])) >= (1, 10):
_resolve_conj = lambda x: x.conj().resolve_conj()
else:
_resolve_conj = lambda x: x.conj()
def get_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""Initializes multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
logger.setLevel(level)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in ("debug", "info", "warning", "error", "exception", "fatal", "critical"):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
log = get_logger(__name__)
"""Structured matrix kernels"""
# Try CUDA extension
try:
from extensions.kernels.cauchy import cauchy_mult as cauchy_cuda
from extensions.kernels.vandermonde import log_vandermonde_cuda
has_cuda_extension = True
log.info("CUDA extension for structured kernels (Cauchy and Vandermonde multiplication) found.")
except:
log.warning(
"CUDA extension for structured kernels (Cauchy and Vandermonde multiplication) not found. Install by going to extensions/kernels/ and running `python setup.py install`, for improved speed and memory efficiency. Note that the kernel changed for state-spaces 4.0 and must be recompiled."
)
has_cuda_extension = False
# Try pykeops
try:
import pykeops
from pykeops.torch import Genred
has_pykeops = True
log.info("Pykeops installation found.")
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def cauchy_keops(v, z, w):
expr_num = 'z * ComplexReal(v) - Real2Complex(Sum(v * w))'
expr_denom = 'ComplexMult(z-w, z-Conj(w))'
cauchy_mult = Genred(
f'ComplexDivide({expr_num}, {expr_denom})',
[
'v = Vj(2)',
'z = Vi(2)',
'w = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
v, z, w = _broadcast_dims(v, z, w)
v = _c2r(v)
z = _c2r(z)
w = _c2r(w)
r = 2*cauchy_mult(v, z, w, backend='GPU')
return _r2c(r)
def log_vandermonde_keops(v, x, L):
expr = 'ComplexMult(v, ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'v = Vj(2)',
'x = Vj(2)',
'l = Vi(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(v, x, l, backend='GPU')
return 2*_r2c(r).real
def log_vandermonde_transpose_keops(u, v, x, L):
"""
u: ... H L
v: ... H N
x: ... H N
Returns: ... H N
V = Vandermonde(a, L) : (H N L)
contract_L(V * u * v)
"""
expr = 'ComplexMult(ComplexMult(v, u), ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'u = Vj(2)',
'v = Vi(2)',
'x = Vi(2)',
'l = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
u, v, x, l = _broadcast_dims(u, v, x, l)
u = _c2r(u)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(u, v, x, l, backend='GPU')
return _r2c(r)
except ImportError:
has_pykeops = False
if not has_cuda_extension:
log.warning(
"Falling back on slow Cauchy and Vandermonde kernel. Install at least one of pykeops or the CUDA extension for better speed and memory efficiency."
)
# Fallback versions
def cauchy_naive(v, z, w):
"""
v: (..., N)
z: (..., L)
w: (..., N)
returns: (..., L) \sum v/(z-w)
"""
v = _conj(v)
w = _conj(w)
cauchy_matrix = v.unsqueeze(-1) / (z.unsqueeze(-2) - w.unsqueeze(-1)) # (... N L)
return torch.sum(cauchy_matrix, dim=-2)
def log_vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... n, ... n l -> ... l', v, vandermonde_matrix) # (... L)
return 2*vandermonde_prod.real
def log_vandermonde_transpose_naive(u, v, x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... l, ... n, ... n l -> ... n', u.to(x), v.to(x), vandermonde_matrix) # (... L)
return vandermonde_prod
""" Simple nn.Module components """
def Activation(activation=None, dim=-1):
if activation in [ None, 'id', 'identity', 'linear' ]:
return nn.Identity()
elif activation == 'tanh':
return nn.Tanh()
elif activation == 'relu':
return nn.ReLU()
elif activation == 'gelu':
return nn.GELU()
elif activation == 'elu':
return nn.ELU()
elif activation in ['swish', 'silu']:
return nn.SiLU()
elif activation == 'glu':
return nn.GLU(dim=dim)
elif activation == 'sigmoid':
return nn.Sigmoid()
elif activation == 'softplus':
return nn.Softplus()
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
def LinearActivation(
d_input, d_output, bias=True,
transposed=False,
activation=None,
activate=False, # Apply activation as part of this module
**kwargs,
):
"""Returns a linear nn.Module with control over axes order, initialization, and activation."""
# Construct core module
linear_cls = partial(nn.Conv1d, kernel_size=1) if transposed else nn.Linear
if activation is not None and activation == 'glu': d_output *= 2
linear = linear_cls(d_input, d_output, bias=bias, **kwargs)
if activate and activation is not None:
activation = Activation(activation, dim=-2 if transposed else -1)
linear = nn.Sequential(linear, activation)
return linear
class DropoutNd(nn.Module):
def __init__(self, p: float = 0.5, tie=True, transposed=True):
"""
tie: tie dropout mask across sequence lengths (Dropout1d/2d/3d)
"""
super().__init__()
if p < 0 or p >= 1:
raise ValueError("dropout probability has to be in [0, 1), " "but got {}".format(p))
self.p = p
self.tie = tie
self.transposed = transposed
self.binomial = torch.distributions.binomial.Binomial(probs=1-self.p)
def forward(self, X):
"""X: (batch, dim, lengths...)."""
if self.training:
if not self.transposed: X = rearrange(X, 'b ... d -> b d ...')
mask_shape = X.shape[:2] + (1,)*(X.ndim-2) if self.tie else X.shape
mask = torch.rand(*mask_shape, device=X.device) < 1.-self.p
X = X * mask * (1.0/(1-self.p))
if not self.transposed: X = rearrange(X, 'b d ... -> b ... d')
return X
return X
"""Misc functional utilities"""
def power(L, A, v=None):
"""Compute A^L and the scan sum_i A^i v_i.
A: (..., N, N)
v: (..., N, L)
"""
I = torch.eye(A.shape[-1]).to(A) # , dtype=A.dtype, device=A.device)
powers = [A]
l = 1
while True:
if L % 2 == 1: I = powers[-1] @ I
L //= 2
if L == 0: break
l *= 2
if v is None:
powers = [powers[-1] @ powers[-1]]
else:
powers.append(powers[-1] @ powers[-1])
if v is None: return I
# Invariants:
# powers[-1] := A^l
# l := largest po2 at most L
# Note that an alternative divide and conquer to compute the reduction is possible and can be embedded into the above loop without caching intermediate powers of A
# We do this reverse divide-and-conquer for efficiency reasons:
# 1) it involves fewer padding steps for non-po2 L
# 2) it involves more contiguous arrays
# Take care of edge case for non-po2 arrays
# Note that this initial step is a no-op for the case of power of 2 (l == L)
k = v.size(-1) - l
v_ = powers.pop() @ v[..., l:]
v = v[..., :l]
v[..., :k] = v[..., :k] + v_
# Handle reduction for power of 2
while v.size(-1) > 1:
v = rearrange(v, '... (z l) -> ... z l', z=2)
v = v[..., 0, :] + powers.pop() @ v[..., 1, :]
return I, v.squeeze(-1)
"""HiPPO utilities"""
def transition(measure, N, **measure_args):
"""A, B transition matrices for different measures.
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Legendre (translated)
if measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# Halve again for timescale correctness
A *= 0.5
B *= 0.5
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
elif measure in ['fourier', 'fout']:
freqs = np.arange(N//2)
d = np.stack([np.zeros(N//2), freqs], axis=-1).reshape(-1)[1:]
A = np.pi*(-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
# Subtract off rank correction - this corresponds to the other endpoint u(t-1) in this case
A = A - B[:, None] * B[None, :]
B = B[:, None]
else:
raise NotImplementedError
return A, B
def rank_correction(measure, N, rank=1, dtype=torch.float):
"""Return low-rank matrix L such that A + L is normal."""
if measure == 'legs':
assert rank >= 1
P = torch.sqrt(.5+torch.arange(N, dtype=dtype)).unsqueeze(0) # (1 N)
elif measure == 'legt':
assert rank >= 2
P = torch.sqrt(1+2*torch.arange(N, dtype=dtype)) # (N)
P0 = P.clone()
P0[0::2] = 0.
P1 = P.clone()
P1[1::2] = 0.
P = torch.stack([P0, P1], dim=0) # (2 N)
P *= 2**(-0.5) # Halve the rank correct just like the original matrix was halved
elif measure in ['fourier', 'fout']:
P = torch.zeros(N)
P[0::2] = 2**.5
P[0] = 1
P = P.unsqueeze(0)
else: raise NotImplementedError
d = P.size(0)
if rank > d:
P = torch.cat([P, torch.zeros(rank-d, N, dtype=dtype)], dim=0) # (rank N)
return P
def nplr(measure, N, rank=1, dtype=torch.float, diagonalize_precision=True, B_clip=2.0):
"""Constructs NPLR form of HiPPO matrices.
Returns w, p, q, V, B such that
(w - p q^*, B) is unitarily equivalent to the original HiPPO A, B by the matrix V
i.e. A = V[w - p q^*]V^*, B = V B
measure: Name of HiPPO method.
N: Size of recurrent A matrix (also known as `d_state` elsewhere).
dtype: Single or double precision.
diagonalize_precision: Calculate diagonalization in double precision.
B_clip: Clip values of B, can help with stability. None for no clipping.
"""
assert dtype == torch.float or dtype == torch.double
cdtype = torch.cfloat if dtype == torch.float else torch.cdouble
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype) # (N, N)
B = torch.as_tensor(B, dtype=dtype)[:, 0] # (N,)
P = rank_correction(measure, N, rank=rank, dtype=dtype) # (r N)
AP = A + torch.sum(P.unsqueeze(-2)*P.unsqueeze(-1), dim=-3)
# We require AP to be nearly skew-symmetric
_A = AP + AP.transpose(-1, -2)
if (err := torch.sum((_A - _A[0,0]*torch.eye(N))**2) / N) > 1e-5: # if not torch.allclose(_A - _A[0,0]*torch.eye(N), torch.zeros(N, N), atol=1e-5):
print("WARNING: HiPPO matrix not skew symmetric", err)
# Take advantage of identity + skew-symmetric form to calculate real and imaginary parts separately
# Imaginary part can use eigh instead of eig
W_re = torch.mean(torch.diagonal(AP), -1, keepdim=True)
# Diagonalize in double precision
if diagonalize_precision: AP = AP.to(torch.double)
# w, V = torch.linalg.eig(AP) # (..., N) (..., N, N)
W_im, V = torch.linalg.eigh(AP*-1j) # (..., N) (..., N, N)
if diagonalize_precision: W_im, V = W_im.to(cdtype), V.to(cdtype)
W = W_re + 1j * W_im
# Check: V W V^{-1} = A
# print("check", V @ torch.diag_embed(W) @ V.conj().transpose(-1, -2))
# Only keep half of each conjugate pair
_, idx = torch.sort(W.imag)
W_sorted = W[idx]
V_sorted = V[:, idx]
# There is an edge case when eigenvalues can be 0, which requires some machinery to handle
# We use a huge hack here: Assume only one pair is 0, and that it is the first row/column of A (only happens in Fourier case)
V = V_sorted[:, :N//2]
W = W_sorted[:N//2] # Only keep negative imaginary components
assert W[-2].abs() > 1e-4, "Only 1 zero eigenvalue allowed in diagonal part of A"
if W[-1].abs() < 1e-4:
V[:, -1] = 0.
V[0, -1] = 2**-0.5
V[1, -1] = 2**-0.5 * 1j
_AP = V @ torch.diag_embed(W) @ V.conj().transpose(-1, -2)
if ((err := torch.sum((2*_AP.real-AP)**2)/N) > 1e-5):
print("Warning: Diagonalization of A matrix not numerically precise - error", err)
# print("check", V @ torch.diag_embed(W) @ V.conj().transpose(-1, -2))
V_inv = V.conj().transpose(-1, -2)
# C = initial_C(measure, N, dtype=dtype)
B = contract('ij, j -> i', V_inv, B.to(V)) # V^* B
# C = contract('ij, j -> i', V_inv, C.to(V)) # V^* C
P = contract('ij, ...j -> ...i', V_inv, P.to(V)) # V^* P
if B_clip is not None:
B = B.real + 1j*torch.clamp(B.imag, min=-B_clip, max=B_clip)
# W represents the imaginary part of the DPLR form: A = W - PP^*
# Downstream classes just call this A for simplicity,
# which is also more consistent with the diagonal case
return W, P, B, V
def dplr(
init='hippo',
N=64, rank=1, H=1,
dtype=torch.float,
real_random=False,
real_scale=1.0,
imag_random=False,
imag_scale=1.0,
B_random=False,
B_init='constant',
B_scale=1.0,
P_scale=1.0,
normalize=False,
):
"""Directly construct a DPLR matrix.
Args:
- init: (str) ['rand', 'lin', inv', 'real', 'hippo'] Choices for initialization of A.
Most of these affect the imaginary part of A, except for 'real'.
- real_random: (bool) Initialize A.real in -U[0, 1]. Otherwise, initialize to -1/2.
- real_scale: (float) Scaling factor of real part of A.
- imag_random: (bool) Initialize A.imag randomly.
- imag_scale: (bool) Scaling factor of imaginary part of A.
- B_init: (str) ['constant' | 'random' | 'alternating' | 'unit-cw' | 'unit-ccw' | 'hippo']
Choices for initialization of B.
- B_scale: (float) Scaling factor for B
- P_scale: (float) Scaling factor for P
- normalize: (bool) Apply an automatic normalization factor on B
"""
assert dtype == torch.float or dtype == torch.double
dtype = torch.cfloat if dtype == torch.float else torch.cdouble
pi = torch.tensor(math.pi)
# Construct real part of diagonal A (must be non-negative)
if real_random:
real_part = torch.rand(H, N//2)
else:
real_part = .5 * torch.ones(H, N//2)
real_part = real_scale * real_part
# Construct imaginary part of diagonal A (must be non-negative)
if imag_random:
imag_part = N//2 * torch.rand(H, N//2)
else:
imag_part = repeat(torch.arange(N//2), 'n -> h n', h=H)
if init in ['random', 'rand']:
imag_part = torch.exp(torch.randn(H, N//2))
elif init == 'real':
imag_part = 0 * imag_part
if real_random:
real_part = torch.rand(H, N//2) * N//2
else:
# This is the S4D-Real method described in the S4D paper
# The A matrix is diag(-1, -2, ..., -N), which are the eigenvalues of the HiPPO matrix
real_part = 1 + repeat(torch.arange(N//2), 'n -> h n', h=H)
elif init in ['linear', 'lin']:
imag_part = pi * imag_part
elif init in ['inverse', 'inv']: # Based on asymptotics of the default HiPPO matrix
imag_part = 1/pi * N * (N/(1+2*imag_part)-1)
elif init in ['inverse2', 'inv2']:
imag_part = 1/pi * N * (N/(1+imag_part)-1)
elif init in ['quadratic', 'quad']:
imag_part = 1/pi * (1+2*imag_part)**2
elif init in ['legs', 'hippo']:
A, _, _, _ = nplr('legs', N)
imag_part = -A.imag # Positive
else: raise NotImplementedError
imag_part = imag_scale * imag_part
# Construct diagonal A
A = -real_part - 1j * imag_part # Force negative real and imag
assert torch.all(A.real < 1e-4) and torch.all(A.imag <= 0.0) # Allow some tolerance for numerical precision on real part
# Initialize B
if B_random:
log.warning("'B_random' is deprecated in favor of B_init='random' and will be deprecated in a future version.")
if init in ['legs', 'hippo']:
log.info(f'Initializing with S4D-LegS and ignoring argument {B_init=}')
# Special initialization using the HiPPO B matrix
# Note that theory (from S4D paper) says that B should be halved
# to match DPLR but we drop this 0.5 factor for simplicity
_, P, B, _ = nplr('legs', N, B_clip=2.0)
B = repeat(B, 'n -> h n', h=H).clone().contiguous()
elif B_init == 'constant':
B = torch.ones(H, N//2, dtype=dtype)
elif B_init == 'random':
B = torch.randn(H, N//2, dtype=dtype)
elif B_init == 'alternating': # Seems to track 'constant' exactly for some reason
B = torch.ones(H, N//4, 2, dtype=dtype)
B[:, :, 1] *= -1
B = B.view(H, N//2)
elif B_init == 'unit-cw':
z = torch.tensor(torch.exp(-2j * pi / N), dtype=dtype)
B = z ** torch.arange(0, N // 2)
B = repeat(B, 'n -> h n', h=H).clone().contiguous()
elif B_init == 'unit-ccw':
z = torch.tensor(torch.exp(2j * pi / N), dtype=dtype)
B = z ** torch.arange(0, N // 2)
B = repeat(B, 'n -> h n', h=H).clone().contiguous()
else: raise NotImplementedError
B *= B_scale
# Experimental feature that appeared in earlier versions of HTTYH (not extensively tested)
# Seems more principled for normalization theoretically, but seemed to hurt on PathX
if normalize:
norm = -B/A # (H, N) # Result if you integrate the kernel with constant 1 function
zeta = 2*torch.sum(torch.abs(norm)**2, dim=-1, keepdim=True) # Variance with a random C vector
B = B / zeta**.5
# Initialize P
if B_init in ['legs', 'hippo']:
# P constructed earlier
P = repeat(P, 'r n -> r h n', h=H).clone().contiguous()
else:
P = torch.randn(rank, H, N//2, dtype=dtype)
P = P * P_scale
# Initialize V (only used in testing)
V = torch.eye(N, dtype=dtype)[:, :N//2]
V = repeat(V, 'n m -> h n m', h=H)
return A, P, B, V
def ssm(init, N, R, H, **ssm_args):
"""Dispatcher to create single SSM initialization
N: state size
R: rank (for DPLR parameterization)
H: number of independent SSM copies
"""
if init.startswith("diag") or init.startswith("dplr"):
if init.startswith("diag"):
ssm_args["P_scale"] = 0.0
args = init[4:].split("-")
assert args[0] == ""
if len(args) > 1:
ssm_args["init"] = args[1]
A, P, B, V = dplr(N=N, rank=R, H=H, **ssm_args)
else:
A, P, B, V = nplr(init, N, R, **ssm_args)
A = repeat(A, 'n -> s n', s=H)
P = repeat(P, 'r n -> r s n', s=H)
B = repeat(B, 'n -> s n', s=H)
V = repeat(V, 'n m -> s n m', s=H)
return A, P, B, V
combinations = {
'hippo': ['legs', 'fourier'],
'diag': ['diag-inv', 'diag-lin'],
'all': ['legs', 'fourier', 'diag-inv', 'diag-lin'],
}
def combination(inits, N, R, S, **ssm_args):
if isinstance(inits, str):
inits = combinations[inits] if inits in combinations else [inits]
assert S % len(inits) == 0, f"{S} independent trainable SSM copies must be multiple of {len(inits)} different inits"
A, P, B, V = zip(
*[ssm(init, N, R, S // len(inits), **ssm_args) for init in inits]
)
A = torch.cat(A, dim=0) # (S N)
P = torch.cat(P, dim=1) # (R S N)
B = torch.cat(B, dim=0) # (S N)
V = torch.cat(V, dim=0) # (S N N)
return A, P, B, V
"""SSM convolution kernels"""
def inv_transform(param, transform='none'):
"""Initialize a (positive) parameter under a transform."""
param = torch.clamp(param, min=1e-4)
if transform == 'none':
return param
elif transform == 'exp':
return torch.log(param) # Some of the HiPPO methods have real part 0
elif transform == 'relu':
return param
elif transform == 'sigmoid':
return torch.logit(param)
elif transform == 'softplus':
return torch.log(torch.exp(param)-1)
else: raise NotImplementedError
def param_transform(param, transform='none'):
"""Get a (positive) parameter under a transform."""
if transform == 'none':
p = param
elif transform == 'exp':
p = torch.exp(param)
elif transform == 'relu':
# JAX version seems to NaN if you allow 0's, although this code was fine without it
p = F.relu(param)+1e-4
elif transform == 'sigmoid':
p = F.sigmoid(param)
elif transform == 'softplus':
p = F.softplus(param)
else: raise NotImplementedError
return p
class Kernel(nn.Module):
"""Interface for modules that produce convolution kernels.
A main distinction between these and normal Modules is that the forward pass
does not take inputs. It is a mapping from parameters to a tensor that can
be used in other modules, in particular as a convolution kernel.
Because of the unusual parameterization, these kernels may often want special
hyperparameter settings on their parameters. The `register` method provides
an easy interface for controlling this, and is intended to be used with an
optimizer hook that can be found in train.py or example.py.
This class also defines an interface for interacting with kernels *statefully*,
in particular for state space models (SSMs). This interface handles the setting
when a model can be converted from a "CNN" into an "RNN".
_setup_step()
step()
default_state()
forward_state()
See ConvKernel for the simplest instantiation of this interface.
"""
def __init__(
self,
d_model: int = 0,
channels: int = 1,
l_max: Optional[int] = None,
lr: Union[float, Optional[Mapping]] = None,
wd: Union[float, Optional[Mapping]] = 0.0,
verbose: bool = True,
**kwargs,
):
"""General interface.
d_model (H): Model dimension, or number of independent convolution kernels created.
channels (C): Extra dimension in the returned output (see .forward()).
- One interpretation is that it expands the input dimension giving it C separate "heads" per feature.
That is convolving by this kernel maps shape (B L D) -> (B L C D)
- This is also used to implement a particular form of bidirectionality in an efficient way.
- In general for making a more powerful model, instead of increasing C
it is recommended to set channels=1 and adjust H to control parameters instead.
l_max (L): Maximum kernel length (optional). If unspecified, most Kernel instantiations
will return kernels of arbitrary length as passed into .forward().
lr: Optional dictionary specifying special hyperparameters for .register().
Passing in a number (e.g. 0.001) sets attributes of SSM parameters (A, B, dt).
A custom optimizer hook is needed to configure the optimizer to set the learning rates appropriately for these parameters.
wd: Same as lr, but for weight decay.
"""
super().__init__()
assert d_model > 0
self.H = self.d_model = d_model
self.L = self.l_max = l_max
self.channels = channels
self.lr = lr
self.wd = wd
self.verbose = verbose
# Add a catch-all **kwargs to make it easier to change kernels
# without manually moving other options passed in the config.
# Good to log these just so it's explicit.
if self.verbose and len(kwargs) > 0:
log.info(f"{type(self)} extra kwargs: {kwargs}")
# Logic for registering parameters
# Case 1: lr: None | float
# All params should have this lr (None means inherit from global lr)
# Case 2: lr: dict
# Specified params should have that lr, all others should be None
if self.lr is None or isinstance(self.lr, float):
self.lr_dict = defaultdict(lambda: self.lr)
else:
self.lr_dict = defaultdict(lambda: None)
self.lr_dict.update(self.lr)
# Same logic for weight decay
# (but is always just set to 0.0 and hasn't been ablated)
if self.wd is None or isinstance(self.wd, float):
self.wd_dict = defaultdict(lambda: self.wd)
else:
self.wd_dict = defaultdict(lambda: None)
self.wd_dict.update(self.wd)
def forward(self, state=None, rate=1.0, L=None):
"""General interface to generate a global convolution kernel.
state: Initial state for recurrent updates.
E.g. for SSMs, this should have shape (B, H, N) (batch, d_model, d_state).
rate: Relative sampling rate.
L: Target kernel length.
Returns:
- (C, H, L) (channels, d_model, l_kernel) The convolution kernel.
- (B, H, L) (batch, d_model, l_kernel)
Extra information for how the state affects the output of convolving by kernel.
"""
raise NotImplementedError
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim)
def _setup_step(self, **kwargs):
"""Convert a model into a recurrent mode for autoregressive inference."""
raise NotImplementedError
def step(self, x, state, **kwargs):
"""Step the model for one timestep with input x and recurrent state."""
raise NotImplementedError
def default_state(self, *args, **kwargs):
"""Return a default initial state."""
raise NotImplementedError
@torch.no_grad()
def forward_state(self, u, state):
"""Forward the state through a sequence, i.e. computes the state after passing chunk through the kernel."""
raise NotImplementedError
@property
def d_state(self):
"""Implement this for interfaces that want to interact with a stateful layer (i.e. SSMs).
Currently the only codepath that might use this is the StateDecoder, which is not used.
"""
raise NotImplementedError
@property
def state_to_tensor(self):
"""Same as d_state, only needed for niche codepaths involving recurrent state."""
raise NotImplementedError
class SSMKernel(Kernel):
"""Parent class for different SSM parameterizations.
This class is abstract and only defines some initializations and flags that are common to all SSM variants.
It is instantiated by subclasses SSMKernel{Dense,Real,Diag,DPLR}.
Options:
d_state (N): State size (dimensionality of parameters A, B, C). Generally shouldn't need to be adjusted and doens't affect speed much for most kernels (e.g. S4, S4D).
deterministic: Use a deterministic initialization for dt, A, B, C.
Useful for debugging as well as constructing a simple exponential decay kernel (e.g. used in S4ND image->video inflation).
dt_min, dt_max: min and max values for the step size dt
dt_tie: Keep dt tied across the N dimensions of the state. Although this theoretically makes more sense, models such as S5 and Mega have found slightly improvements by setting it to False.
dt_transform: Transform function for parameterization of dt (default 'softplus', used to be 'exp')
rank: Rank of low-rank correction for DPLR mode. Needs to be increased for init "legt".
n_ssm: Number of independent trainable (A, B) SSMs, e.g.
`n_ssm=1` means all A/B parameters are tied across the H different instantiations of C.
`n_ssm=None` means all H SSMs are completely independent.
Generally, changing this option can save parameters but doesn't affect performance or speed much.
This parameter must divide H.
init: Options for initialization of (A, B). For DPLR mode, recommendations are "legs", "fout", "hippo" (combination of both). For Diag mode, recommendations are "diag-inv", "diag-lin", "diag-legs", and "diag" (combination of diag-inv and diag-lin).
init_args: Extra arguments passed into initialization function (see dplr.py for options).
"""
def init_dt(self):
# Generate dt
if self.deterministic: # Meant for debugging
assert self.dt_tie, "Deterministic dt initialization is tied"
assert self.dt_transform == 'exp', "Deterministic dt transform should be 'exp' for simplicity"
inv_dt = torch.exp(torch.linspace(math.log(self.dt_min), math.log(self.dt_max), self.H)).unsqueeze(-1) # (H 1)
else:
shape = (self.H, 1) if self.dt_tie else (self.H, self.N//2)
# Initialize log dt
inv_dt = torch.rand(*shape, dtype=self.dtype) * (
math.log(self.dt_max) - math.log(self.dt_min)
) + math.log(self.dt_min)
if self.dt_transform != 'exp':
inv_dt = inv_transform(torch.exp(inv_dt), self.dt_transform)
return inv_dt
def init_ssm_real(self):
"""Returns (dense, real) (A, B, C) parameters for init options."""
# Generate A, B
A, B = transition(self.init, self.N)
A = torch.as_tensor(A, dtype=self.dtype)
B = torch.as_tensor(B, dtype=self.dtype)[:, 0]
B = repeat(B, 'n -> v n', v=self.n_ssm).clone().contiguous()
A = repeat(A, 'n m -> v n m', v=self.n_ssm).clone().contiguous()
# Generate C
if self.deterministic:
C = torch.zeros(self.channels, self.H, self.N, dtype=self.dtype)
C[..., :1] = 1.0
else:
C = torch.randn(self.channels, self.H, self.N, dtype=self.dtype)
return A, B, C
def init_ssm_dplr(self):
"""Returns DPLR (A, P, B, C) parameters for init options."""
A, P, B, V = combination(self.init, self.N, self.rank, self.n_ssm, **self.init_args)
# Broadcast C to have H channels
if self.deterministic:
C = torch.zeros(self.channels, self.n_ssm, self.N, dtype=self.cdtype)
C[:, :, :1] = 1.
C = contract('hmn, chn -> chm', V.conj().transpose(-1, -2), C) # V^* C
C = repeat(C, 'c t n -> c (v t) n', v=self.H // C.size(-2)).clone().contiguous()
else:
C = torch.randn(self.channels, self.H, self.N//2, dtype=self.cdtype)
# Broadcast other parameters to have n_ssm copies
assert self.n_ssm % B.size(-2) == 0 \
and self.n_ssm % P.size(-2) == 0 \
and self.n_ssm % A.size(-2) == 0
# Broadcast tensors to n_ssm copies
# These will be the parameters, so make sure tensors are materialized and contiguous
B = repeat(B, 't n -> (v t) n', v=self.n_ssm // B.size(-2)).clone().contiguous()
P = repeat(P, 'r t n -> r (v t) n', v=self.n_ssm // P.size(-2)).clone().contiguous()
A = repeat(A, 't n -> (v t) n', v=self.n_ssm // A.size(-2)).clone().contiguous()
# Because these complex parameterizations assume conjugate symmetry,
# halve the value of self.N for convenience
self.N //= 2
return A, P, B, C
def __init__(
self,
# General Kernel arguments for parent class
d_model: int = 0,
channels: int = 1,
l_max: Optional[int] = None,
lr: Union[float, Optional[Mapping]] = None,
wd: Union[float, Optional[Mapping]] = 0.0,
verbose: bool = True,
# SSM arguments
d_state: int = 64,
deterministic: bool = False,
# dt options
dt_min: float = 0.001,
dt_max: float = 0.1,
dt_tie: bool = True,
dt_transform: str = 'exp',
# (A, B, C) options
rank: int = 1,
n_ssm: Optional[int] = None,
measure: Optional[str] = None,
init: Optional[str] = "legs",
# Extra hyperparameters for initialization
**init_args,
):
super().__init__(d_model=d_model, channels=channels, l_max=l_max, lr=lr, wd=wd, verbose=verbose)
self.N = d_state
self.dtype, self.cdtype = torch.float, torch.cfloat
self.deterministic = deterministic
# dt options
self.dt_min = dt_min
self.dt_max = dt_max
self.dt_tie = dt_tie
self.dt_transform = dt_transform
# SSM options (A, B, C)
self.rank = rank
self.n_ssm = n_ssm if n_ssm is not None else self.H
if measure is not None:
log.warning("Warning: 'measure' option changed to 'init' and will be removed in a future version.")
assert init is None, "'measure' and 'init' cannot both be passed into SSMKernel"
init, measure = measure, init
self.init = init
self.init_args = init_args
@torch.no_grad()
def forward_state(self, u, state):
"""Forward the state through a sequence, i.e. computes the state after passing chunk through SSM
This is a generic version of this functionality that works for SSMs.
It is currently used by SSMKernelDense and SSMKernelDPLR.
This is a suboptimal implementation; it is recommended to use SSMKernelDiag
if this functionality is desired.
state: (B, H, N)
u: (B, H, L)
Returns: (B, H, N)
"""
# Construct dA, dB matrices
dA, dB = self._setup_state() # (H N N) (H N)
conj = state.size(-1) != dA.size(-1)
if conj: state = _conj(state)
v = contract('h n, b h l -> b h n l', dB, u.flip(-1))
AL, v = power(u.size(-1), dA, v)
next_state = contract("h m n, b h n -> b h m", AL, state)
next_state = next_state + v
if conj: next_state = next_state[..., : next_state.size(-1) // 2]
return next_state
def _setup_state(self):
"""Register dA and dB to module."""
raise NotImplementedError
@property
def d_state(self):
"""d_state and state_to_tensor are used by specific decoders.
These were used in earlier versions and should not be needed in general.
"""
return self.H * self.N
@property
def state_to_tensor(self):
return lambda state: rearrange('... h n -> ... (h n)', state)
class SSMKernelDiag(SSMKernel):
"""SSM kernel using diagonal state matrix (S4D model).
Options:
disc: ['zoh' | 'bilinear' | 'dss'] Discretization options.
dt_fast: (experimental) Parameterize inv_dt under sinh function.
(Ohno et al. "Fast Saturating Gate for Learning Long Time Scales with RNNs")
real_transform, imag_transform: ['none' | 'exp' | 'relu' | 'sigmoid' | 'softplus']
Parameterize the real/imag parts of the diagonal of A under this function.
bandlimit: Mask high frequencies of the kernel (indices corresponding to
diagonal elements with large imaginary part). Introduced in S4ND paper.
backend: ['cuda' | 'keops' | 'naive'] Options for Vandermonde/Cauchy kernel (in order of efficiency).
is_real : Real-valued SSM; can be interpreted as EMA.
"""
def __init__(
self,
disc: str = 'zoh', # Change to 'bilinear' to match S4, but should make little difference either way
dt_fast: bool = False,
real_transform: str = 'exp',
imag_transform: str = 'none',
bandlimit: Optional[float] = None,
backend: str = 'cuda',
is_real: bool = False,
**kwargs,
):
# Special case: for real-valued, d_state semantics change
if is_real and 'd_state' in kwargs:
kwargs['d_state'] = kwargs['d_state'] * 2
super().__init__(**kwargs)
self.disc = disc
self.dt_fast = dt_fast
self.real_transform = real_transform
self.imag_transform = imag_transform
self.bandlimit = bandlimit
self.backend = backend
self.is_real = is_real
# Initialize dt, A, B, C
inv_dt = self.init_dt()
A, P, B, C = self.init_ssm_dplr()
# Note that in the Diag case, P will be ignored
# The DPLR case subclasses this and uses P
self.register_params(A, B, C, inv_dt, P)
def register_params(self, A, B, C, inv_dt, P):
"""Process the initialization into form of trainable parameters.
A: (S, N) diagonal matrix
B: (S, N)
C: (C, H, N)
dt: (H) timescale per feature
Dimensions:
N (or d_state): state size
H (or d_model): total SSM copies
S (or n_ssm): number of trainable copies of (A, B, dt); must divide H
C (or channels): system is 1-dim to C-dim
The forward pass of this Module returns a tensor of shape (C, H, L)
Note: tensor shape N here denotes half the true state size, because of conjugate symmetry
"""
assert self.backend in ['cuda', 'keops', 'naive']
if self.dt_fast: inv_dt = torch.asinh(inv_dt)
# Rank of low-rank correction
assert self.H == inv_dt.size(0)
assert self.N == A.size(-1) == B.size(-1) == C.size(-1)
assert self.n_ssm == A.size(-2) == B.size(-2) # Number of independent SSMs trained
self.repeat = self.H // A.size(0)
# Check that diagonal part has negative real and imag part
# (allow some tolerance for numerical precision on real part
# since it may be constructed by a diagonalization)
assert torch.all(A.real < 1e-4) and torch.all(A.imag <= 0.0)
# Broadcast everything to correct shapes
C = C.expand(torch.broadcast_shapes(C.shape, (1, self.H, self.N))) # (C, H, N) # TODO originally this was only in DPLR, check safe for Diag
B = B.unsqueeze(0) # (1, H, N)
assert self.channels == C.shape[0]
# Register dt
self.register("inv_dt", inv_dt, self.lr_dict['dt'], self.wd_dict['dt'])
# Register ABC
if self.is_real:
self.register("C", C.real, self.lr_dict['C'], None)
self.register("B", B.real, self.lr_dict['B'], self.wd_dict['B'])
self.register("A_real", inv_transform(-A.real, self.real_transform), self.lr_dict['A'], self.wd_dict['A'])
else:
self.register("C", _c2r(_resolve_conj(C)), self.lr_dict['C'], None)
self.register("B", _c2r(B), self.lr_dict['B'], self.wd_dict['B'])
self.register("A_real", inv_transform(-A.real, self.real_transform), self.lr_dict['A'], self.wd_dict['A'])
self.register("A_imag", inv_transform(-A.imag, self.imag_transform), self.lr_dict['A'], self.wd_dict['A'])
def _get_params(self, rate=1.0):
"""Process the internal parameters."""
# (S N) where S=n_ssm
if self.is_real:
A = -param_transform(self.A_real, self.real_transform)
B = self.B # (1 S N)
C = self.C # (C H N)
else:
A = -param_transform(self.A_real, self.real_transform) - 1j * param_transform(self.A_imag, self.imag_transform)
B = _r2c(self.B) # (1 S N)
C = _r2c(self.C) # (C H N)
if self.dt_fast: inv_dt = torch.sinh(self.inv_dt)
else: inv_dt = self.inv_dt
dt = param_transform(inv_dt, self.dt_transform) * rate # (H N)
if self.bandlimit is not None:
freqs = dt / rate * A.imag.abs() / (2*math.pi) # (H N)
mask = torch.where(freqs < self.bandlimit * .5, 1, 0)
C = C * mask
# Incorporate dt into A and B
A = repeat(A, 't n -> (v t) n', v=self.repeat) # (H N)
B = repeat(B, 'b t n -> b (v t) n', v=self.repeat) # (1 H N)
# TODO: The downstream algorithm should only need to access dt*A
# However the current DPLR kernel still uses dt and A separately
# Once that is fixed, this should return dtA instead of dt and A
dtA = dt * A # (H N)
return dt, A, B, C
def forward(self, L, state=None, rate=1.0):
"""See Kernel.forward() for argument documentation."""
dt, A, B, C = self._get_params(rate)
dtA = dt * A
# Augment B with state
if state is not None:
s = state / dt
if self.disc == 'bilinear':
s = s * (1. + dtA/2)
elif self.disc == 'zoh':
s = s * dtA * dtA.exp() / (dtA.exp() - 1.)
B = torch.cat([s, B], dim=-3) # (1+B H N)
# Combine B and C
C = (B[:, None, :, :] * C).view(-1, self.H, self.N)
# Dispatch which Vandermonde kernel to use
if has_cuda_extension and C.dtype == torch.cfloat and C.device.type == 'cuda' and self.backend == 'cuda':
log_vandermonde = log_vandermonde_cuda
elif has_pykeops and self.backend in ['cuda', 'keops']:
log_vandermonde = log_vandermonde_keops
else:
log_vandermonde = log_vandermonde_naive
# Main kernel
if self.disc == 'zoh':
# Power up
C = C * (torch.exp(dtA)-1.) / A
K = log_vandermonde(C, dtA, L) # (H L)
elif self.disc == 'bilinear':
C = C * (1. - dtA/2).reciprocal() * dt # or * dtA / A
dA = (1. + dtA/2) / (1. - dtA/2)
K = log_vandermonde(C, dA.log(), L)
elif self.disc == 'dss':
# Implementation from DSS meant for case when real eigenvalues can be positive
P = dtA.unsqueeze(-1) * torch.arange(L, device=C.device) # [H N L]
A_gt_0 = A.real > 0 # [N]
if A_gt_0.any():
with torch.no_grad():
P_max = dtA * (A_gt_0 * (L-1)) # [H N]
P = P - P_max.unsqueeze(-1) # [H N L]
S = P.exp() # [H N L]
dtA_neg = dtA * (1 - 2*A_gt_0) # [H N]
num = dtA_neg.exp() - 1 # [H N]
den = (dtA_neg * L).exp() - 1 # [H N]
# Inline reciprocal function for DSS logic
x = den * A
x_conj = _resolve_conj(x)
r = x_conj / (x*x_conj + 1e-7)
C = C * num * r # [C H N]
K = contract('chn,hnl->chl', C, S).float()
else: raise ValueError(f"Discretization {self.disc} not supported")
K = K.view(-1, self.channels, self.H, L) # (1+B C H L)
if state is not None:
K_state = K[:-1, :, :, :] # (B C H L)
else:
K_state = None
K = K[-1, :, :, :] # (C H L)
return K, K_state
def _setup_step(self):
"""Set up dA, dB, dC discretized parameters for stepping."""
dt, A, B, C, = self._get_params()
# Incorporate dt into A
dtA = dt * A # (H N)
if self.disc == 'zoh':
self.dA = torch.exp(dtA) # (H N)
self.dB = B * (torch.exp(dtA)-1.) / A # (C H N)
elif self.disc == 'bilinear':
self.dA = (1. + dtA/2) / (1. - dtA/2)
self.dB = B * (1. - dtA/2).reciprocal() * dt # or * dtA / A
self.dB = rearrange(self.dB, '1 h n -> h n')
self.dC = C
def default_state(self, *batch_shape):
C = _r2c(self.C)
state = torch.zeros(*batch_shape, self.H, self.N, dtype=C.dtype, device=C.device)
return state
def step(self, u, state):
next_state = contract("h n, b h n -> b h n", self.dA, state) \
+ contract("h n, b h -> b h n", self.dB, u)
y = contract("c h n, b h n -> b c h", self.dC, next_state)
return 2*y.real, next_state
def forward_state(self, u, state):
"""Pass the state forward through an entire sequence."""
self._setup_step()
AL = self.dA ** u.size(-1)
u = u.flip(-1).to(self.dA).contiguous() # (B H L)
# Dispatch which Vandermonde kernel to use
if has_pykeops and self.backend in ['cuda', 'keops']:
log_vandermonde_transpose = log_vandermonde_transpose_keops
else:
log_vandermonde_transpose = log_vandermonde_transpose_naive
v = log_vandermonde_transpose(u, self.dB, self.dA.log(), u.size(-1))
next_state = AL * state + v
return next_state
class SSMKernelDPLR(SSMKernelDiag):
"""SSM kernel for diagonal + low rank (DPLR) state matrices, corresponding to the original S4 model."""
@torch.no_grad()
def _setup_C(self, L):
"""Construct C~ from C.
Two modes are supported: go directly to length L if self.l_kernel is 1, or length is doubled
"""
if self.l_kernel.item() == 0:
if self.verbose: log.info(f"S4: Initializing kernel to length {L}")
double_length = False
elif L > self.l_kernel.item(): # 2*int(self.l_kernel) == L:
if self.verbose: log.info(f"S4: Doubling length from L = {self.l_kernel.item()} to {2*self.l_kernel.item()}")
double_length = True
L = self.l_kernel.item() # Convenience for the math below
else: return
C = _r2c(self.C)
dA, _ = self._setup_state()
dA_L = power(L, dA)
# Multiply C by I - dA_L
C_ = _conj(C)
prod = contract("h m n, c h n -> c h m", dA_L.transpose(-1, -2), C_)
if double_length: prod = -prod # Multiply by I + dA_L instead
C_ = C_ - prod
C_ = C_[..., :self.N] # Take conjugate pairs again
self.C.copy_(_c2r(C_))
self.l_kernel = 2*self.l_kernel if double_length else self.l_kernel+L # Preserve type/device
def _omega(self, L, dtype, device, cache=True):
"""Calculate (and cache) FFT nodes.
This also caches a version of the nodes "unprocessed" with the bilinear transform.
This method should be called everytime the internal length self.l_kernel changes.
"""
# Use cached if available
if cache and hasattr(self, 'omega') and self.omega.size(-1) == L//2+1:
return self.omega, self.z
omega = torch.tensor(
np.exp(-2j * np.pi / (L)), dtype=dtype, device=device
) # \omega_{2L}
omega = omega ** torch.arange(0, L // 2 + 1, device=device)
z = 2 * (1 - omega) / (1 + omega)
# Cache if necessary
if cache:
self.omega = omega
self.z = z
return omega, z
def register_params(self, A, B, C, inv_dt, P):
"""Process the initialization into form of trainable parameters.
The SSM state matrix is represented by diag_embed(A) - PP^*
Note that the A notation here is slightly overloaded:
normally A refers to the full SSM state matrix (DPLR in this case)
but here we're using it to refer to the diagonal part of the matrix.
This is to make variable names compatible with the SSMKernelDiag class (DSS/S4D)
and is a much simpler variable name (e.g. as opposed to Lambda).
A: (S, N) diagonal part
P: (R, S, N) low-rank part
B: (S, N)
C: (C, H, N)
dt: (H) timescale per feature
Dimensions:
N (or d_state): state size
H (or d_model): total SSM copies
S (or n_ssm): number of trainable copies of (A, B, dt); must divide H
R (or rank): rank of low-rank part
C (or channels): system is 1-dim to C-dim
The forward pass of this Module returns a tensor of shape (C, H, L)
Note: tensor shape N here denotes half the true state size, because of conjugate symmetry
"""
# Print out kernel lengths; it can be tricky to make sure the length logic is correct
if self.verbose:
log.info(f"Constructing S4 (H, N, L) = ({self.H}, {self.N}, {self.l_max})")
# Register the basic params for diagonal SSM (A, B, C, dt)
super().register_params(A, B, C, inv_dt, P)
# Check shapes
assert self.rank == P.shape[-3]
assert self.N == P.size(-1)
assert self.n_ssm == P.size(-2)
self.register('P', _c2r(P), self.lr_dict['A'], self.wd_dict['A'])
# Track the current kernel length this is "attuned" to
self.register_buffer('l_kernel', torch.tensor(0))
def _get_params(self, rate=1.0):
dt, A, B, C = super()._get_params(rate=rate)
P = _r2c(self.P) # (R S N)
P = repeat(P, 'r t n -> r (v t) n', v=self.repeat) # (R H N)
Q = P.conj()
return dt, A, B, C, P, Q
def forward(self, state=None, rate=1.0, L=None):
"""See Kernel.forward() for argument documentation."""
# Initialize C~ if necessary (done in forward pass so it's on the correct device)
if self.l_kernel.item() == 0 and self.l_max is not None and self.l_max > 0:
self._setup_C(self.l_max)
# Handle sampling rate logic
# The idea is that this kernel's length (in continuous units) is self.l_kernel, while we are asked to provide a kernel of length L at (relative) frequency rate
if L is None:
L = round(self.l_kernel.item() / rate)
# Increase the internal length if needed
continuous_L = round(rate*L)
while continuous_L > self.l_kernel.item():
self._setup_C(continuous_L)
discrete_L = round(self.l_kernel.item()/rate)
dt, A, B, C, P, Q = self._get_params(rate)
# Get FFT nodes of right length
omega, z = self._omega(discrete_L, dtype=A.dtype, device=A.device, cache=(rate==1.0))
# Augment B
if state is not None:
# Have to "unbilinear" the state to put it into the same "type" as B
# Compute 1/dt * (I + dt/2 A) @ state
# Can do this without expanding (maybe minor speedup using conj symmetry in theory), but it's easier to read this way
s = _conj(state) if state.size(-1) == self.N else state # (B H N)
sA = (
s * _conj(A) # (B H N)
- contract('bhm, rhm, rhn -> bhn', s, _conj(Q), _conj(P))
)
s = s / dt + sA / 2
s = s[..., :self.N]
B = torch.cat([s, B], dim=-3) # (B+1, H, N)
# Incorporate dt into A
A = A * dt # (H N)
# Stack B and p, C and q for convenient batching
B = torch.cat([B, P], dim=-3) # (B+1+R, H, N)
C = torch.cat([C, Q], dim=-3) # (C+R, H, N)
# Incorporate B and C batch dimensions
v = B.unsqueeze(-3) * C.unsqueeze(-4) # (B+1+R, C+R, H, N)
v = v * dt # Incorporate dt into B
# Dispatch which Cauchy kernel to use
if has_cuda_extension and z.dtype == torch.cfloat and z.device.type == 'cuda' and self.kernel == 'cuda':
cauchy_mult = cauchy_cuda
elif has_pykeops and self.kernel in ['cuda', 'keops']:
cauchy_mult = cauchy_keops
else:
cauchy_mult = cauchy_naive
# Calculate resolvent at omega
r = cauchy_mult(v, z, A)
# Low-rank Woodbury correction
if self.rank == 1:
k_f = r[:-1, :-1, :, :] - r[:-1, -1:, :, :] * r[-1:, :-1, :, :] / (1 + r[-1:, -1:, :, :])
elif self.rank == 2:
r00 = r[: -self.rank, : -self.rank, :, :]
r01 = r[: -self.rank, -self.rank :, :, :]
r10 = r[-self.rank :, : -self.rank, :, :]
r11 = r[-self.rank :, -self.rank :, :, :]
det = (1 + r11[:1, :1, :, :]) * (1 + r11[1:, 1:, :, :]) - r11[:1, 1:, :, :] * r11[1:, :1, :, :]
s = (
r01[:, :1, :, :] * (1 + r11[1:, 1:, :, :]) * r10[:1, :, :, :]
+ r01[:, 1:, :, :] * (1 + r11[:1, :1, :, :]) * r10[1:, :, :, :]
- r01[:, :1, :, :] * (r11[:1, 1:, :, :]) * r10[1:, :, :, :]
- r01[:, 1:, :, :] * (r11[1:, :1, :, :]) * r10[:1, :, :, :]
)
s = s / det
k_f = r00 - s
else:
r00 = r[:-self.rank, :-self.rank, :, :]
r01 = r[:-self.rank, -self.rank:, :, :]
r10 = r[-self.rank:, :-self.rank, :, :]
r11 = r[-self.rank:, -self.rank:, :, :]
r11 = rearrange(r11, "a b h n -> h n a b")
r11 = torch.linalg.inv(torch.eye(self.rank, device=r.device) + r11)
r11 = rearrange(r11, "h n a b -> a b h n")
k_f = r00 - torch.einsum("i j h n, j k h n, k l h n -> i l h n", r01, r11, r10)
# Final correction for the bilinear transform
k_f = k_f * 2 / (1 + omega)
# Move from frequency to coefficients
k = torch.fft.irfft(k_f, n=discrete_L) # (B+1, C, H, L)
# # Truncate to target length
k = k[..., :L]
if state is not None:
k_state = k[:-1, :, :, :] # (B, C, H, L)
else:
k_state = None
k_B = k[-1, :, :, :] # (C H L)
return k_B, k_state
@torch.no_grad()
def double_length(self):
self._setup_C(2*self.l_kernel)
@torch.no_grad()
def _check(self):
"""Check if A, B, C parameters and vanilla SSMKernel construction can be recovered"""
# assert self.l_kernel > 0, "Set up module first"
K = self.forward(L=self.l_max)[0]
self._setup_step()
K_ = krylov(self.l_max, self.dA, self.dB, self.dC)
diff = K - K_
print("checking DPLR Kernel construction", torch.sum(diff ** 2))
@torch.no_grad()
def _setup_linear(self):
"""Preprocessing that allows fast linear-time (in state dimension) stepping."""
dt, A, B, C, P, Q = self._get_params()
# Prepare Linear stepping
D = (2.0 / dt - A).reciprocal() # (H, N)
R = (torch.eye(self.rank, dtype=A.dtype, device=A.device) + 2*contract('r h n, h n, s h n -> h r s', Q, D, P).real) # (H R R)
Q_D = rearrange(Q*D, 'r h n -> h r n')
try:
R = torch.linalg.solve(R, Q_D) # (H R N)
except:
R = torch.tensor(np.linalg.solve(R.to(Q_D).contiguous().detach().cpu(), Q_D.contiguous().detach().cpu())).to(Q_D)
R = rearrange(R, 'h r n -> r h n')
self.step_params = {
"D": D, # (H N)
"R": R, # (R H N)
"P": P, # (R H N)
"Q": Q, # (R H N)
"B": B, # (1 H N)
"E": 2.0 / dt + A, # (H N)
}
def _step_state_linear(self, u=None, state=None):
"""
Version of the step function that has time O(N) instead of O(N^2) per step, which takes advantage of the DPLR form and bilinear discretization.
Unfortunately, as currently implemented it's about 2x slower because it calls several sequential operations.
Perhaps a fused CUDA kernel implementation would be much faster.
u: (H) Input
state: (H, N/2) State with conjugate pairs. Optionally, the state can have last dimension N.
Returns: same shape as state
"""
C = _r2c(self.C) # View used for dtype/device
if u is None: # Special case used to find dA
u = torch.zeros(self.H, dtype=C.dtype, device=C.device)
if state is None: # Special case used to find dB
state = torch.zeros(self.H, self.N, dtype=C.dtype, device=C.device)
step_params = self.step_params.copy()
if state.size(-1) == self.N: # Only store half of the conjugate pairs; should be true by default
# There should be a slightly faster way using conjugate symmetry
contract_fn = lambda p, x, y: contract('r h n, r h m, ... h m -> ... h n', _conj(p), _conj(x), _conj(y))[..., :self.N] # inner outer product
else:
assert state.size(-1) == 2*self.N
step_params = {k: _conj(v) for k, v in step_params.items()}
contract_fn = lambda p, x, y: contract('r h n, r h m, ... h m -> ... h n', p, x, y) # inner outer product
D = step_params["D"] # (H N)
E = step_params["E"] # (H N)
R = step_params["R"] # (R H N)
P = step_params["P"] # (R H N)
Q = step_params["Q"] # (R H N)
B = step_params["B"] # (1 H N)
new_state = E * state - contract_fn(P, Q, state) # (B H N)
new_state = new_state + 2.0 * B * u.unsqueeze(-1) # (B H N)
new_state = D * (new_state - contract_fn(P, R, new_state))
return new_state
def _setup_state(self):
"""Construct dA and dB for discretized state equation."""
# Construct dA and dB by using the stepping
self._setup_linear()
C = _r2c(self.C) # Just returns a view that we use for finding dtype/device
state = torch.eye(2*self.N, dtype=C.dtype, device=C.device).unsqueeze(-2) # (N 1 N)
dA = self._step_state_linear(state=state)
dA = rearrange(dA, "n h m -> h m n")
u = C.new_ones(self.H)
dB = self._step_state_linear(u=u)
dB = _conj(dB)
dB = rearrange(dB, '1 h n -> h n') # (H N)
return dA, dB
def _step_state(self, u, state):
"""Must be called after self.default_state() is used to construct an initial state!"""
next_state = (torch.einsum(self.state_contraction, self.dA, state)
+ torch.einsum(self.input_contraction, self.dB, u))
return next_state
def _setup_step(self, mode='dense'):
"""Set up dA, dB, dC discretized parameters for stepping."""
self.dA, self.dB = self._setup_state()
# Calculate original C
C = _conj(_r2c(self.C)) # (H C N)
if self.l_kernel.item() == 0:
dC = C
else:
# self.C represents C_tilde
dA_L = power(self.l_kernel.item(), self.dA)
I = torch.eye(self.dA.size(-1)).to(dA_L)
dC = torch.linalg.solve(
I - dA_L.transpose(-1, -2),
C.unsqueeze(-1),
).squeeze(-1)
self.dC = dC
# Do special preprocessing for different step modes
self._step_mode = mode
if mode == 'linear':
# Linear case: special step function for the state, we need to handle output
# use conjugate symmetry by default, which affects the output projection
self.dC = 2*self.dC[:, :, :self.N]
elif mode == 'diagonal':
# Eigendecomposition of the A matrix
L, V = torch.linalg.eig(self.dA)
V_inv = torch.linalg.inv(V)
# Check that the eigendedecomposition is correct
if self.verbose:
print("Diagonalization error:", torch.dist(V @ torch.diag_embed(L) @ V_inv, self.dA))
# Change the parameterization to diagonalize
self.dA = L
self.dB = contract('h n m, h m -> h n', V_inv, self.dB)
self.dC = contract('h n m, c h n -> c h m', V, self.dC)
elif mode == 'dense':
pass
else: raise NotImplementedError("DPLR Kernel step mode must be {'dense' | 'linear' | 'diagonal'}")
def default_state(self, *batch_shape):
C = _r2c(self.C)
N = C.size(-1)
H = C.size(-2)
# Cache the tensor contractions we will later do, for efficiency
# These are put in this function because they depend on the batch size
step_mode = getattr(self, "_step_mode", "dense") # Used in default_state, which is called without _setup_step() in forward_state()
if step_mode != 'linear':
N *= 2
if step_mode == 'diagonal':
self.state_contraction = "h n, ... h n -> ... h n"
else:
# Dense (quadratic) case: expand all terms
self.state_contraction = "h m n, ... h n -> ... h m"
self.input_contraction = "h n, ... h -> ... h n"
self.output_contraction = "c h n, ... h n -> ... c h"
state = torch.zeros(*batch_shape, H, N, dtype=C.dtype, device=C.device)
return state
def step(self, u, state):
"""Must have called self._setup_step() and created state with self.default_state() before calling this."""
if self._step_mode == 'linear':
new_state = self._step_state_linear(u, state)
else:
new_state = self._step_state(u, state)
y = torch.einsum(self.output_contraction, self.dC, new_state)
return y.real, new_state
def forward_state(self, *args, **kwargs):
# Dispatch directly to generic state forwarding
# instead of using the Diag version
# TODO design pattern is ugly. Can be fixed with an intermediate
# subclass above Diag/DPLR that has the shared logic (parameter construction)
# but not the state/step logic.
# Fine to keep like this for now since we want Diag to be the standard
# instead of having too many layers of subclassing.
return SSMKernel.forward_state(self, *args, **kwargs)
kernel_registry = {
's4d': SSMKernelDiag,
'diag': SSMKernelDiag,
's4': SSMKernelDPLR,
'nplr': SSMKernelDPLR,
'dplr': SSMKernelDPLR,
}
class FFTConv(nn.Module):
"""Implements an FFT Convolution around a convolution kernel.
d_model (H): Model dimension (in CNN terminology, this would be "channels").
l_max (L): The maximum kernel length. Set l_max=None to always use a global kernel.
channels: Can be interpreted as a number of "heads"; the SSM is a map from a 1-dim to C-dim sequence. It's not recommended to change this; instead, increase d_model for larger models.
bidirectional: If True, convolution kernel will be two-sided.
activation: Activation after the full convolution.
transposed, dropout, tie_dropout: More general model options, see SequenceModule.
mode: Which kernel algorithm to use. 'nplr' is the full S4 model; 'diag' is the simpler S4D. Other options can be found in the kernel registry.
kernel_args: See the class .kernel.SSMKernel for the kernel constructor which accepts kernel_args. Relevant options that are worth considering and tuning include "mode", "init", "dt_min", "dt_max", "lr"
"""
def __init__(
self,
d_model,
l_max=None,
channels=1,
swap_channels=False,
bidirectional=False,
activation='gelu', # Activation after layer
transposed=True,
dropout=0.0,
tie_dropout=False,
drop_kernel=0.0,
mode='dplr',
kernel=None,
**kernel_args, # Arguments passed into inner convolution kernel
):
super().__init__()
self.d_model = d_model
self.L = self.l_max = l_max
self.bidirectional = bidirectional
self.channels = channels
self.transposed = transposed
self.swap_channels = swap_channels
if activation is not None and activation.startswith('glu'):
channels *= 2
self.activation = Activation(activation, dim=1 if self.transposed else -1)
self.D = nn.Parameter(torch.randn(channels, self.d_model))
if self.bidirectional:
channels *= 2
# Inner convolution kernel
if mode is not None:
assert kernel is None, "Pass either mode or kernel but not both"
# log.info(
# "Argument 'mode' is deprecated and renamed to 'kernel',"
# "and will be removed in a future version."
# )
kernel, mode = mode, kernel
kernel_cls = kernel_registry[kernel]
self.kernel = kernel_cls(
d_model=self.d_model,
l_max=self.l_max,
channels=channels,
**kernel_args,
)
dropout_fn = DropoutNd if tie_dropout else nn.Dropout
self.drop = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
self.drop_kernel = nn.Dropout(drop_kernel) if drop_kernel > 0.0 else nn.Identity()
def forward(self, x, state=None, rate=1.0, **kwargs): # absorbs return_output and transformer src mask
"""
x: (B D L) if self.transposed else (B L D)
"""
# Always work with (B D L) dimension in this module
if not self.transposed: x = x.transpose(-1, -2)
L = x.size(-1)
# Compute SS Kernel
l_kernel = L if self.L is None else min(L, round(self.L / rate))
k, k_state = self.kernel(L=l_kernel, rate=rate, state=state) # (C H L) (B C H L)
# Convolution
if self.bidirectional:
k0, k1 = rearrange(k, '(s c) h l -> s c h l', s=2)
k = F.pad(k0, (0, L)) \
+ F.pad(k1.flip(-1), (L, 0))
# The above has an off-by-one in the reverse direction
# This is a deliberate choice since the off-by-one should not affect any applications
# This can be amended which may be very slightly slower
# k = F.pad(k0, (0, L)) \
# + F.pad(k1[..., 1:].flip(-1), (L+1, 0)) \
# + F.pad(k1[..., :1], (0, l_kernel+L-1))
# Kernel dropout
k = self.drop_kernel(k)
# In principle, we could pad to l_kernel+L-1 instead of l_kernel+L, but we choose the latter for
# equational simplicity. Additionally, we have not experimented to compare the efficiency of the two.
k_f = torch.fft.rfft(k, n=l_kernel+L) # (C H L)
x_f = torch.fft.rfft(x, n=l_kernel+L) # (B H L)
y_f = contract('bhl,chl->bchl', x_f, k_f)
y = torch.fft.irfft(y_f, n=l_kernel+L)[..., :L] # (B C H L)
# Compute D term in state space equation - essentially a skip connection
y = y + contract('bhl,ch->bchl', x, self.D)
# Compute state update
if state is not None:
assert not self.bidirectional, "Bidirectional not supported with state forwarding"
y = y + k_state #
next_state = self.kernel.forward_state(x, state)
else:
next_state = None
# Reshape to flatten channels
if self.swap_channels:
y = rearrange(y, 'b c h l -> b (h c) l')
else:
y = rearrange(y, 'b c h l -> b (c h) l')
y = self.drop(y) # DropoutNd better with transposed=True
if not self.transposed: y = y.transpose(-1, -2)
y = self.activation(y)
return y, next_state
def setup_step(self, **kwargs):
self.kernel._setup_step(**kwargs)
def step(self, x, state):
""" Step one time step as a recurrent model. Intended to be used during validation.
x: (B H)
state: (B H N)
Returns: output (B H), state (B H N)
"""
y, next_state = self.kernel.step(x, state) # (B C H)
y = y + x.unsqueeze(-2) * self.D
y = rearrange(y, 'b c h -> b (c h)')
y = self.activation(y)
return y, next_state
def default_state(self, *batch_shape, device=None):
# kernel is not a SequenceModule so it doesn't need to adhere to same interface
# the kernel will know the device of its own parameters
return self.kernel.default_state(*batch_shape)
@property
def d_output(self):
return self.d_model * self.channels
class S4Block(nn.Module):
"""General block design wrapping an inner layer. Currently only layer=FFTConv is supported, but easy to incorporate others.
Arguments:
- bottleneck: Reduce dimension of inner layer (e.g. used in GSS).
- gate: Add multiplicative gating (e.g. used in GSS), which is essentially a multiplicative instead of additive residual branch.
- gate_act: Activation function to apply on the gate residual branch.
- mult_act: Activation function to apply after gate multiplication (e.g. GELU in GSS).
- final_act: Activation function to apply after final linear layer. 'id' for no activation, None for no linear layer at all.
- initializer: Initializer on final linear layer.
- weight_norm: Weight normalization on final linear layer.
- dropout: standard dropout argument. tie_dropout=True ties the dropout mask across the sequence length, emulating nn.Dropout1d
- transposed: Choose backbone axis ordering of (B, L, H) (if False) or (B, H, L) (if True) [B=batch size, L=sequence length, H=model dimension]
Other options are all experimental and should not need to be configured.
"""
def __init__(
self,
d_model,
bottleneck=None,
gate=None,
gate_act=None,
mult_act=None,
final_act='glu',
postact=None,
initializer=None,
weight_norm=False,
dropout=0.0,
tie_dropout=False,
transposed=True,
**layer_args, # Arguments into inner layer (e.g. FFTConv)
):
super().__init__()
self.d_model = d_model
self.transposed = transposed
self.gate = gate
self.bottleneck = bottleneck
if bottleneck is not None:
self.d_model = self.d_model // bottleneck
self.input_linear = LinearActivation(
self.d_model,
self.d_model,
transposed=False,
activation=None,
activate=False,
)
if gate is not None:
self.input_gate = LinearActivation(
self.d_model,
self.d_model * gate,
transposed=False,
activation=gate_act,
activate=True,
)
if self.layer.d_output != self.d_model * gate:
self.output_gate = LinearActivation(
self.d_model*self.channels,
self.d_model * gate,
transposed=False,
activation=None,
activate=False,
)
# Currently this module only uses FFTConv for its inner module
# But the options here are all agnostic to the inner block
# If other types of inner layers are desired, it is easy
# to add an option to swap a different module in
self.layer = FFTConv(d_model, transposed=False, dropout=dropout, tie_dropout=tie_dropout, **layer_args)
# Pointwise operations
# Activation after (optional) multiplication by gate branch
self.mult_activation = Activation(mult_act)
# dropout_fn = nn.Dropout2d if self.transposed else nn.Dropout # Broken in torch==1.11
dropout_fn = partial(DropoutNd, transposed=False) if tie_dropout else nn.Dropout
self.drop = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
if postact is not None:
assert final_act is None
log.warning("Warning: 'postact' option changed to 'final_act' and will be removed in a future version.")
final_act, postact = postact, final_act
if final_act is None:
self.output_linear = nn.Identity()
else:
self.output_linear = LinearActivation(
self.d_model*gate if gate is not None else self.layer.d_output,
self.d_model,
transposed=False,
activation=final_act,
activate=True,
)
def forward(self, x, lengths=None, **kwargs): # absorbs return_output and transformer src mask
"""
x: (B H L) if self.transposed else (B L H)
state: (H N) never needed unless you know what you're doing
Returns: same shape as x
"""
if self.transposed: x = rearrange(x, 'b d ... -> b ... d')
L = x.size(1)
# Mask out padding tokens
# TODO handle option for mask - instead of lengths, which assumes suffix padding
if isinstance(lengths, int):
if lengths != L:
lengths = torch.tensor(lengths, dtype=torch.long, device=x.device)
else:
lengths = None
if lengths is not None:
assert isinstance(lengths, torch.Tensor) and lengths.ndim == 1 and lengths.size(0) in [1, x.size(0)]
mask = torch.where(torch.arange(L, device=lengths.device)[:, None] < lengths[:, None, None], 1., 0.)
x = x * mask
if self.gate is not None:
v = self.input_gate(x)
if self.bottleneck is not None:
x = self.input_linear(x)
y, state = self.layer(x, **kwargs)
if self.gate is not None:
y = self.output_gate(y)
y = y * v
y = self.mult_activation(y)
y = self.drop(y)
y = self.output_linear(y)
if self.transposed: y = rearrange(y, 'b d ... -> b ... d')
return y, state
def setup_step(self, **kwargs):
self.layer.setup_step(**kwargs)
def step(self, x, state):
"""Step one time step as a recurrent model. Intended to be used during validation.
x: (B H)
state: (B H N)
Returns: output (B H), state (B H N)
"""
if self.gate is not None:
v = self.input_gate(x)
if self.bottleneck is not None:
x = self.input_linear(x)
y, next_state = self.layer.step(x, state) # (B C H)
if self.gate is not None:
y = self.output_gate(y)
y = y * v
y = self.mult_activation(y)
y = self.drop(y)
y = self.output_linear(y)
return y, next_state
def default_state(self, *batch_shape, device=None):
# kernel is not a SequenceModule so it doesn't need to adhere to same interface
# the kernel will know the device of its own parameters
return self.layer.default_state(*batch_shape)
@property
def d_output(self):
return self.d_model
| state-spaces-main | models/s4/s4.py |
"""Minimal version of S4D with extra options and features stripped out, for pedagogical purposes."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from src.models.nn import DropoutNd
class S4DKernel(nn.Module):
"""Generate convolution kernel from diagonal SSM parameters."""
def __init__(self, d_model, N=64, dt_min=0.001, dt_max=0.1, lr=None):
super().__init__()
# Generate dt
H = d_model
log_dt = torch.rand(H) * (
math.log(dt_max) - math.log(dt_min)
) + math.log(dt_min)
C = torch.randn(H, N // 2, dtype=torch.cfloat)
self.C = nn.Parameter(torch.view_as_real(C))
self.register("log_dt", log_dt, lr)
log_A_real = torch.log(0.5 * torch.ones(H, N//2))
A_imag = math.pi * repeat(torch.arange(N//2), 'n -> h n', h=H)
self.register("log_A_real", log_A_real, lr)
self.register("A_imag", A_imag, lr)
def forward(self, L):
"""
returns: (..., c, L) where c is number of channels (default 1)
"""
# Materialize parameters
dt = torch.exp(self.log_dt) # (H)
C = torch.view_as_complex(self.C) # (H N)
A = -torch.exp(self.log_A_real) + 1j * self.A_imag # (H N)
# Vandermonde multiplication
dtA = A * dt.unsqueeze(-1) # (H N)
K = dtA.unsqueeze(-1) * torch.arange(L, device=A.device) # (H N L)
C = C * (torch.exp(dtA)-1.) / A
K = 2 * torch.einsum('hn, hnl -> hl', C, torch.exp(K)).real
return K
def register(self, name, tensor, lr=None):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {"weight_decay": 0.0}
if lr is not None: optim["lr"] = lr
setattr(getattr(self, name), "_optim", optim)
class S4D(nn.Module):
def __init__(self, d_model, d_state=64, dropout=0.0, transposed=True, **kernel_args):
super().__init__()
self.h = d_model
self.n = d_state
self.d_output = self.h
self.transposed = transposed
self.D = nn.Parameter(torch.randn(self.h))
# SSM Kernel
self.kernel = S4DKernel(self.h, N=self.n, **kernel_args)
# Pointwise
self.activation = nn.GELU()
# dropout_fn = nn.Dropout2d # NOTE: bugged in PyTorch 1.11
dropout_fn = DropoutNd
self.dropout = dropout_fn(dropout) if dropout > 0.0 else nn.Identity()
# position-wise output transform to mix features
self.output_linear = nn.Sequential(
nn.Conv1d(self.h, 2*self.h, kernel_size=1),
nn.GLU(dim=-2),
)
def forward(self, u, **kwargs): # absorbs return_output and transformer src mask
""" Input and output shape (B, H, L) """
if not self.transposed: u = u.transpose(-1, -2)
L = u.size(-1)
# Compute SSM Kernel
k = self.kernel(L=L) # (H L)
# Convolution
k_f = torch.fft.rfft(k, n=2*L) # (H L)
u_f = torch.fft.rfft(u, n=2*L) # (B H L)
y = torch.fft.irfft(u_f*k_f, n=2*L)[..., :L] # (B H L)
# Compute D term in state space equation - essentially a skip connection
y = y + u * self.D.unsqueeze(-1)
y = self.dropout(self.activation(y))
y = self.output_linear(y)
if not self.transposed: y = y.transpose(-1, -2)
return y, None # Return a dummy state to satisfy this repo's interface, but this can be modified
| state-spaces-main | models/s4/s4d.py |
import numpy as np
from scipy import linalg
from scipy.stats import norm, entropy
from sklearn.cluster import KMeans
def fid(feat_data, feat_gen):
"""
Calculate Frechet Inception Distance
"""
# Means
mu_data = np.mean(feat_data, axis=0)
mu_gen = np.mean(feat_gen, axis=0)
# Covariances
try:
sigma_data = np.cov(feat_data, rowvar=False)
sigma_gen = np.cov(feat_gen, rowvar=False)
covmean, _ = linalg.sqrtm(sigma_data.dot(sigma_gen), disp=False)
if not np.isfinite(covmean).all():
print("fid calculation produces singular product; adding perturbation to diagonal of cov estimates")
offset = np.eye(sigma_data.shape[0]) * 1e-4
covmean, _ = linalg.sqrtm((sigma_data + offset).dot(sigma_gen + offset))
# Now calculate the FID
fid_value = np.sum(np.square(mu_gen - mu_data)) + np.trace(sigma_gen + sigma_data - 2*covmean)
return fid_value
except ValueError:
return np.inf
def inception_score(probs_gen):
"""
Calculate Inception Score
"""
# Set seed
np.random.seed(0)
# Shuffle probs_gen
probs_gen = probs_gen[np.random.permutation(len(probs_gen))]
# Split probs_gen into two halves
probs_gen_1 = probs_gen[:len(probs_gen)//2]
probs_gen_2 = probs_gen[len(probs_gen)//2:]
# Calculate average label distribution for split 2
mean_2 = np.mean(probs_gen_2, axis=0)
# Compute the mean kl-divergence between the probability distributions
# of the generated and average label distributions
kl = entropy(probs_gen_1, np.repeat(mean_2[None, :], len(probs_gen_1), axis=0)).mean()
# Compute the expected score
is_score = np.exp(kl)
return is_score
def modified_inception_score(probs_gen, n=10000):
"""
Calculate Modified Inception Score
"""
# Set seed
np.random.seed(0)
n_samples = len(probs_gen)
all_kls = []
for i in range(n):
# Sample two prob vectors
indices = np.random.choice(np.arange(n_samples), size=2, replace=True)
probs_gen_1 = probs_gen[indices[0]]
probs_gen_2 = probs_gen[indices[1]]
# Calculate their KL
kl = entropy(probs_gen_1, probs_gen_2)
all_kls.append(kl)
# Compute the score
mis_score = np.exp(np.mean(all_kls))
return mis_score
def am_score(probs_data, probs_gen):
"""
Calculate AM Score
"""
mean_data = np.mean(probs_data, axis=0)
mean_gen = np.mean(probs_gen, axis=0)
entropy_gen = np.mean(entropy(probs_gen, axis=1))
am_score = entropy(mean_data, mean_gen) + entropy_gen
return am_score
def two_proportions_z_test(p1, n1, p2, n2, significance_level, z_threshold=None):
# Taken from https://github.com/eitanrich/gans-n-gmms/blob/master/utils/ndb.py
# Per http://stattrek.com/hypothesis-test/difference-in-proportions.aspx
# See also http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/binotest.htm
p = (p1 * n1 + p2 * n2) / (n1 + n2)
se = np.sqrt(p * (1 - p) * (1/n1 + 1/n2))
z = (p1 - p2) / se
# Allow defining a threshold in terms as Z (difference relative to the SE) rather than in p-values.
if z_threshold is not None:
return abs(z) > z_threshold
p_values = 2.0 * norm.cdf(-1.0 * np.abs(z)) # Two-tailed test
return p_values < significance_level
def ndb_score(feat_data, feat_gen):
# Run K-Means cluster on feat_data with K=50
kmeans = KMeans(n_clusters=50, random_state=0).fit(feat_data)
# Get cluster labels for feat_data and feat_gen
labels_data = kmeans.predict(feat_data)
labels_gen = kmeans.predict(feat_gen)
# Calculate number of data points in each cluster using np.unique
counts_data = np.unique(labels_data, return_counts=True)[1]
counts_gen = np.zeros_like(counts_data)
values, counts = np.unique(labels_gen, return_counts=True)
counts_gen[values] = counts
# Calculate proportion of data points in each cluster
prop_data = counts_data / len(labels_data)
prop_gen = counts_gen / len(labels_gen)
# Calculate number of bins with statistically different proportions
different_bins = two_proportions_z_test(prop_data, len(labels_data), prop_gen, len(labels_gen), 0.05)
ndb = np.count_nonzero(different_bins)
return ndb/50.
| state-spaces-main | models/sashimi/metrics.py |
"""
SaShiMi backbone.
Use this backbone in your own models. You'll also need to copy over the
standalone S4 layer, which can be found at `state-spaces/models/s4/`
It's Raw! Audio Generation with State-Space Models
Karan Goel, Albert Gu, Chris Donahue, Christopher Re.
"""
import sys
sys.path.append('../')
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from models.s4.s4 import LinearActivation, S4Block as S4
class DownPool(nn.Module):
def __init__(self, d_input, expand, pool):
super().__init__()
self.d_output = d_input * expand
self.pool = pool
self.linear = LinearActivation(
d_input * pool,
self.d_output,
transposed=True,
)
def forward(self, x):
x = rearrange(x, '... h (l s) -> ... (h s) l', s=self.pool)
x = self.linear(x)
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
if x is None: return None, state
state.append(x)
if len(state) == self.pool:
x = rearrange(torch.stack(state, dim=-1), '... h s -> ... (h s)')
x = x.unsqueeze(-1)
x = self.linear(x)
x = x.squeeze(-1)
return x, []
else:
return None, state
def default_state(self, *args, **kwargs):
return []
class UpPool(nn.Module):
def __init__(self, d_input, expand, pool):
super().__init__()
self.d_output = d_input // expand
self.pool = pool
self.linear = LinearActivation(
d_input,
self.d_output * pool,
transposed=True,
)
def forward(self, x, skip=None):
x = self.linear(x)
x = F.pad(x[..., :-1], (1, 0)) # Shift to ensure causality
x = rearrange(x, '... (h s) l -> ... h (l s)', s=self.pool)
if skip is not None:
x = x + skip
return x, None
def step(self, x, state, **kwargs):
"""
x: (..., H)
"""
assert len(state) > 0
y, state = state[0], state[1:]
if len(state) == 0:
assert x is not None
x = x.unsqueeze(-1)
x = self.linear(x)
x = x.squeeze(-1)
x = rearrange(x, '... (h s) -> ... h s', s=self.pool)
state = list(torch.unbind(x, dim=-1))
else: assert x is None
return y, state
def default_state(self, *batch_shape, device=None):
state = torch.zeros(batch_shape + (self.d_output, self.pool), device=device) # (batch, h, s)
state = list(torch.unbind(state, dim=-1)) # List of (..., H)
return state
class FFBlock(nn.Module):
def __init__(self, d_model, expand=2, dropout=0.0):
"""
Feed-forward block.
Args:
d_model: dimension of input
expand: expansion factor for inverted bottleneck
dropout: dropout rate
"""
super().__init__()
input_linear = LinearActivation(
d_model,
d_model * expand,
transposed=True,
activation='gelu',
activate=True,
)
dropout = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
output_linear = LinearActivation(
d_model * expand,
d_model,
transposed=True,
activation=None,
activate=False,
)
self.ff = nn.Sequential(
input_linear,
dropout,
output_linear,
)
def forward(self, x):
return self.ff(x), None
def default_state(self, *args, **kwargs):
return None
def step(self, x, state, **kwargs):
# expects: (B, D, L)
return self.ff(x.unsqueeze(-1)).squeeze(-1), state
class ResidualBlock(nn.Module):
def __init__(
self,
d_model,
layer,
dropout=0.0,
):
"""
Residual S4 block.
Args:
d_model: dimension of the model
layer: a layer config
dropout: dropout rate
"""
super().__init__()
self.layer = layer
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
def forward(self, x):
"""
Input x is shape (B, d_input, L)
"""
z = x
# Prenorm
z = self.norm(z.transpose(-1, -2)).transpose(-1, -2)
# Apply layer: we ignore the state input and output for training
z, _ = self.layer(z)
# Dropout on the output of the layer
z = self.dropout(z)
# Residual connection
x = z + x
return x, None
def default_state(self, *args, **kwargs):
return self.layer.default_state(*args, **kwargs)
def step(self, x, state, **kwargs):
z = x
# Prenorm
z = self.norm(z)
# Apply layer
z, state = self.layer.step(z, state, **kwargs)
# Residual connection
x = z + x
return x, state
class Sashimi(nn.Module):
def __init__(
self,
d_model=64,
n_layers=8,
pool=[4, 4],
expand=2,
ff=2,
bidirectional=False,
unet=False,
dropout=0.0,
**s4_args,
):
"""
SaShiMi model backbone.
Args:
d_model: dimension of the model. We generally use 64 for all our experiments.
n_layers: number of (Residual (S4) --> Residual (FF)) blocks at each pooling level.
We use 8 layers for our experiments, although we found that increasing layers even further generally
improves performance at the expense of training / inference speed.
pool: pooling factor at each level. Pooling shrinks the sequence length at lower levels.
We experimented with a pooling factor of 4 with 1 to 4 tiers of pooling and found 2 tiers to be best.
It's possible that a different combination of pooling factors and number of tiers may perform better.
expand: expansion factor when pooling. Features are expanded (i.e. the model becomes wider) at lower levels of the architecture.
We generally found 2 to perform best (among 2, 4).
ff: expansion factor for the FF inverted bottleneck. We generally found 2 to perform best (among 2, 4).
bidirectional: use bidirectional S4 layers. Bidirectional layers are suitable for use with non-causal models
such as diffusion models like DiffWave.
unet: use a unet-like architecture, adding (Residual (S4) --> Residual (FF)) layers before downpooling.
All else fixed, this slows down inference (and slightly slows training), but generally improves performance.
We use this variant when dropping in SaShiMi into diffusion models, and this should generally be preferred
for non-autoregressive models.
dropout: dropout rate. Default to 0.0, since we haven't found settings where SaShiMi overfits.
"""
super().__init__()
self.d_model = H = d_model
self.d_output = H
self.unet = unet
def s4_block(dim):
layer = S4(
d_model=dim,
d_state=64,
bidirectional=bidirectional,
dropout=dropout,
transposed=True,
**s4_args,
)
return ResidualBlock(
d_model=dim,
layer=layer,
dropout=dropout,
)
def ff_block(dim):
layer = FFBlock(
d_model=dim,
expand=ff,
dropout=dropout,
)
return ResidualBlock(
d_model=dim,
layer=layer,
dropout=dropout,
)
# Down blocks
d_layers = []
for p in pool:
if unet:
# Add blocks in the down layers
for _ in range(n_layers):
d_layers.append(s4_block(H))
if ff > 0: d_layers.append(ff_block(H))
# Add sequence downsampling and feature expanding
d_layers.append(DownPool(H, expand, p))
H *= expand
# Center block
c_layers = []
for _ in range(n_layers):
c_layers.append(s4_block(H))
if ff > 0: c_layers.append(ff_block(H))
# Up blocks
u_layers = []
for p in pool[::-1]:
block = []
H //= expand
block.append(UpPool(H * expand, expand, p))
for _ in range(n_layers):
block.append(s4_block(H))
if ff > 0: block.append(ff_block(H))
u_layers.append(nn.ModuleList(block))
self.d_layers = nn.ModuleList(d_layers)
self.c_layers = nn.ModuleList(c_layers)
self.u_layers = nn.ModuleList(u_layers)
self.norm = nn.LayerNorm(H)
assert H == d_model
def forward(self, x, state=None):
"""
input: (batch, length, d_input)
output: (batch, length, d_output)
"""
x = x.transpose(1, 2)
# Down blocks
outputs = []
outputs.append(x)
for layer in self.d_layers:
x, _ = layer(x)
outputs.append(x)
# Center block
for layer in self.c_layers:
x, _ = layer(x)
x = x + outputs.pop() # add a skip connection to the last output of the down block
# Up blocks
for block in self.u_layers:
if self.unet:
for layer in block:
x, _ = layer(x)
x = x + outputs.pop() # skip connection
else:
for layer in block:
x, _ = layer(x)
if isinstance(layer, UpPool):
# Before modeling layer in the block
x = x + outputs.pop()
outputs.append(x)
x = x + outputs.pop() # add a skip connection from the input of the modeling part of this up block
# feature projection
x = x.transpose(1, 2) # (batch, length, expand)
x = self.norm(x)
return x, None # required to return a state
def default_state(self, *args, **kwargs):
layers = list(self.d_layers) + list(self.c_layers) + [layer for block in self.u_layers for layer in block]
return [layer.default_state(*args, **kwargs) for layer in layers]
def step(self, x, state, **kwargs):
"""
input: (batch, d_input)
output: (batch, d_output)
"""
# States will be popped in reverse order for convenience
state = state[::-1]
# Down blocks
outputs = [] # Store all layers for SaShiMi
next_state = []
for layer in self.d_layers:
outputs.append(x)
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
if x is None: break
# Center block
if x is None:
# Skip computations since we've downsized
skipped = len(self.d_layers) - len(outputs)
for _ in range(skipped + len(self.c_layers)):
next_state.append(state.pop())
if self.unet:
for i in range(skipped):
next_state.append(state.pop())
u_layers = list(self.u_layers)[skipped//3:]
else:
for i in range(skipped):
for _ in range(len(self.u_layers[i])):
next_state.append(state.pop())
u_layers = list(self.u_layers)[skipped:]
else:
outputs.append(x)
for layer in self.c_layers:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
x = x + outputs.pop()
u_layers = self.u_layers
for block in u_layers:
if self.unet:
for layer in block:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
x = x + outputs.pop()
else:
for layer in block:
x, _next_state = layer.step(x, state=state.pop(), **kwargs)
next_state.append(_next_state)
if isinstance(layer, UpPool):
# Before modeling layer in the block
x = x + outputs.pop()
outputs.append(x)
x = x + outputs.pop()
# feature projection
x = self.norm(x)
return x, next_state
def setup_rnn(self, mode='dense'):
"""
Convert the SaShiMi model to a RNN for autoregressive generation.
Args:
mode: S4 recurrence mode. Using `diagonal` can speed up generation by 10-20%.
`linear` should be faster theoretically but is slow in practice since it
dispatches more operations (could benefit from fused operations).
Note that `diagonal` could potentially be unstable if the diagonalization is numerically unstable
(although we haven't encountered this case in practice), while `dense` should always be stable.
"""
assert mode in ['dense', 'diagonal', 'linear']
for module in self.modules():
if hasattr(module, '_setup_step'): module._setup_step(mode=mode)
if __name__ == '__main__':
from tqdm.auto import tqdm
model = Sashimi(n_layers=2).cuda()
# Print parameter count
print(sum(p.numel() for p in model.parameters()))
model.eval()
with torch.no_grad():
# Forward in convolutional mode: used for training SaShiMi
x = torch.randn(3, 10240, 64).cuda()
y, _ = model(x)
# Setup the SaShiMi RNN
model.setup_rnn('diagonal')
# Forward in recurrent mode: used for autoregressive generation at inference time
ys = []
state = model.default_state(*x.shape[:1], device='cuda')
for i in tqdm(range(10240)):
y_, state = model.step(x[:, i], state)
ys.append(y_.detach().cpu())
ys = torch.stack(ys, dim=1)
breakpoint()
print(y.shape, ys.shape)
| state-spaces-main | models/sashimi/sashimi.py |
#!/usr/bin/env python
"""Train a CNN for Google speech commands."""
__author__ = 'Yuan Xu, Erdene-Ochir Tuguldur'
"""With modifications from Karan Goel."""
import argparse
import time
import torch
import torchvision
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
from tensorboardX import SummaryWriter
from torchvision.transforms import Compose
from tqdm.auto import tqdm
from models.resnext import CifarResNeXt
from speech_commands_dataset import BackgroundNoiseDataset, CLASSES, SpeechCommandsDataset
from transforms import (
AddBackgroundNoiseOnSTFT,
ChangeAmplitude,
ChangeSpeedAndPitchAudio,
DeleteSTFT,
FixAudioLength,
FixSTFTDimension,
LoadAudio,
StretchAudioOnSTFT,
TimeshiftAudioOnSTFT,
ToMelSpectrogramFromSTFT,
ToMelSpectrogram,
ToSTFT,
ToTensor,
)
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--train-dataset", type=str, default='datasets/speech_commands/train', help='path of train dataset')
parser.add_argument("--valid-dataset", type=str, default='datasets/speech_commands/valid', help='path of validation dataset')
parser.add_argument("--background-noise", type=str, default='datasets/speech_commands/train/_background_noise_', help='path of background noise')
parser.add_argument("--comment", type=str, default='', help='comment in tensorboard title')
parser.add_argument("--batch-size", type=int, default=128, help='batch size')
parser.add_argument("--dataload-workers-nums", type=int, default=6, help='number of workers for dataloader')
parser.add_argument("--weight-decay", type=float, default=1e-2, help='weight decay')
parser.add_argument("--optim", choices=['sgd', 'adam'], default='sgd', help='choices of optimization algorithms')
parser.add_argument("--learning-rate", type=float, default=1e-4, help='learning rate for optimization')
parser.add_argument("--lr-scheduler", choices=['plateau', 'step'], default='plateau', help='method to adjust learning rate')
parser.add_argument("--lr-scheduler-patience", type=int, default=5, help='lr scheduler plateau: Number of epochs with no improvement after which learning rate will be reduced')
parser.add_argument("--lr-scheduler-step-size", type=int, default=50, help='lr scheduler step: number of epochs of learning rate decay.')
parser.add_argument("--lr-scheduler-gamma", type=float, default=0.1, help='learning rate is multiplied by the gamma to decrease it')
parser.add_argument("--max-epochs", type=int, default=70, help='max number of epochs')
parser.add_argument("--resume", type=str, help='checkpoint file to resume')
parser.add_argument("--input", choices=['mel32'], default='mel32', help='input of NN')
args = parser.parse_args()
use_gpu = torch.cuda.is_available()
print('use_gpu', use_gpu)
if use_gpu:
torch.backends.cudnn.benchmark = True
n_mels = 32
if args.input == 'mel40':
n_mels = 40
data_aug_transform = Compose([ChangeAmplitude(), ChangeSpeedAndPitchAudio(), FixAudioLength(), ToSTFT(), StretchAudioOnSTFT(), TimeshiftAudioOnSTFT(), FixSTFTDimension()])
bg_dataset = BackgroundNoiseDataset(args.background_noise, data_aug_transform)
add_bg_noise = AddBackgroundNoiseOnSTFT(bg_dataset)
train_feature_transform = Compose([ToMelSpectrogramFromSTFT(n_mels=n_mels), DeleteSTFT(), ToTensor('mel_spectrogram', 'input')])
train_dataset = SpeechCommandsDataset(args.train_dataset,
Compose([LoadAudio(),
data_aug_transform,
add_bg_noise,
train_feature_transform]))
valid_feature_transform = Compose([ToMelSpectrogram(n_mels=n_mels), ToTensor('mel_spectrogram', 'input')])
valid_dataset = SpeechCommandsDataset(args.valid_dataset,
Compose([LoadAudio(),
FixAudioLength(),
valid_feature_transform]))
weights = train_dataset.make_weights_for_balanced_classes()
sampler = WeightedRandomSampler(weights, len(weights))
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, sampler=sampler,
pin_memory=use_gpu, num_workers=args.dataload_workers_nums)
valid_dataloader = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False,
pin_memory=use_gpu, num_workers=args.dataload_workers_nums)
# a name used to save checkpoints etc.
full_name = '%s_%s_%s_bs%d_lr%.1e_wd%.1e' % ('resnext', args.optim, args.lr_scheduler, args.batch_size, args.learning_rate, args.weight_decay)
if args.comment:
full_name = '%s_%s' % (full_name, args.comment)
model = CifarResNeXt(nlabels=len(CLASSES), in_channels=1)
if use_gpu:
model = torch.nn.DataParallel(model).cuda()
criterion = torch.nn.CrossEntropyLoss()
if args.optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=args.weight_decay)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)
start_timestamp = int(time.time()*1000)
start_epoch = 0
best_accuracy = 0
best_loss = 1e100
global_step = 0
if args.resume:
print("resuming a checkpoint '%s'" % args.resume)
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
model.float()
optimizer.load_state_dict(checkpoint['optimizer'])
best_accuracy = checkpoint.get('accuracy', best_accuracy)
best_loss = checkpoint.get('loss', best_loss)
start_epoch = checkpoint.get('epoch', start_epoch)
global_step = checkpoint.get('step', global_step)
del checkpoint # reduce memory
if args.lr_scheduler == 'plateau':
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=args.lr_scheduler_patience, factor=args.lr_scheduler_gamma)
else:
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_scheduler_step_size, gamma=args.lr_scheduler_gamma, last_epoch=start_epoch-1)
def get_lr():
return optimizer.param_groups[0]['lr']
writer = SummaryWriter(comment=('_speech_commands_' + full_name))
def train(epoch):
global global_step
print("epoch %3d with lr=%.02e" % (epoch, get_lr()))
phase = 'train'
writer.add_scalar('%s/learning_rate' % phase, get_lr(), epoch)
model.train() # Set model to training mode
running_loss = 0.0
it = 0
correct = 0
total = 0
pbar = tqdm(train_dataloader, unit="audios", unit_scale=train_dataloader.batch_size)
for batch in pbar:
inputs = batch['input']
inputs = torch.unsqueeze(inputs, 1)
targets = batch['target']
inputs = Variable(inputs, requires_grad=True)
targets = Variable(targets, requires_grad=False)
if use_gpu:
inputs = inputs.cuda()
targets = targets.cuda()
# forward/backward
outputs = model(inputs)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# statistics
it += 1
global_step += 1
running_loss += loss.item() # loss.data[0]
pred = outputs.data.max(1, keepdim=True)[1]
correct += pred.eq(targets.data.view_as(pred)).sum()
total += targets.size(0)
writer.add_scalar('%s/loss' % phase, loss.item(), global_step)
# update the progress bar
pbar.set_postfix({
'loss': "%.05f" % (running_loss / it),
'acc': "%.02f%%" % (100*correct/total)
})
accuracy = correct/total
epoch_loss = running_loss / it
writer.add_scalar('%s/accuracy' % phase, 100*accuracy, epoch)
writer.add_scalar('%s/epoch_loss' % phase, epoch_loss, epoch)
def valid(epoch):
global best_accuracy, best_loss, global_step
phase = 'valid'
model.eval() # Set model to evaluate mode
running_loss = 0.0
it = 0
correct = 0
total = 0
pbar = tqdm(valid_dataloader, unit="audios", unit_scale=valid_dataloader.batch_size)
for batch in pbar:
inputs = batch['input']
inputs = torch.unsqueeze(inputs, 1)
targets = batch['target']
inputs = Variable(inputs, volatile = True)
targets = Variable(targets, requires_grad=False)
if use_gpu:
inputs = inputs.cuda()
targets = targets.cuda()
# forward
outputs = model(inputs)
loss = criterion(outputs, targets)
# statistics
it += 1
global_step += 1
running_loss += loss.item()
pred = outputs.data.max(1, keepdim=True)[1]
correct += pred.eq(targets.data.view_as(pred)).sum()
total += targets.size(0)
writer.add_scalar('%s/loss' % phase, loss.item(), global_step)
# update the progress bar
pbar.set_postfix({
'loss': "%.05f" % (running_loss / it),
'acc': "%.02f%%" % (100*correct/total)
})
accuracy = correct/total
epoch_loss = running_loss / it
writer.add_scalar('%s/accuracy' % phase, 100*accuracy, epoch)
writer.add_scalar('%s/epoch_loss' % phase, epoch_loss, epoch)
checkpoint = {
'epoch': epoch,
'step': global_step,
'state_dict': model.state_dict(),
'loss': epoch_loss,
'accuracy': accuracy,
'optimizer' : optimizer.state_dict(),
}
if accuracy > best_accuracy:
best_accuracy = accuracy
torch.save(checkpoint, 'checkpoints/best-loss-speech-commands-checkpoint-%s.pth' % full_name)
torch.save(model, '%d-%s-best-loss.pth' % (start_timestamp, full_name))
if epoch_loss < best_loss:
best_loss = epoch_loss
torch.save(checkpoint, 'checkpoints/best-acc-speech-commands-checkpoint-%s.pth' % full_name)
torch.save(model, '%d-%s-best-acc.pth' % (start_timestamp, full_name))
torch.save(checkpoint, 'checkpoints/last-speech-commands-checkpoint.pth')
del checkpoint # reduce memory
return epoch_loss
print("training %s for Google speech commands..." % 'resnext')
since = time.time()
for epoch in range(start_epoch, args.max_epochs):
if args.lr_scheduler == 'step':
lr_scheduler.step()
train(epoch)
epoch_loss = valid(epoch)
if args.lr_scheduler == 'plateau':
lr_scheduler.step(metrics=epoch_loss)
time_elapsed = time.time() - since
time_str = 'total time elapsed: {:.0f}h {:.0f}m {:.0f}s '.format(time_elapsed // 3600, time_elapsed % 3600 // 60, time_elapsed % 60)
print("%s, best accuracy: %.02f%%, best loss %f" % (time_str, 100*best_accuracy, best_loss))
print("finished")
| state-spaces-main | models/sashimi/sc09_classifier/train_speech_commands.py |
"""Google speech commands dataset."""
__author__ = 'Yuan Xu'
"""With modifications by Karan Goel to support training an SC09 classifier."""
import os
import numpy as np
import librosa
from torch.utils.data import Dataset
__all__ = [ 'CLASSES', 'SpeechCommandsDataset', 'BackgroundNoiseDataset' ]
CLASSES = 'zero, one, two, three, four, five, six, seven, eight, nine'.split(', ')
class SpeechCommandsDataset(Dataset):
"""Google speech commands dataset. Only 'yes', 'no', 'up', 'down', 'left',
'right', 'on', 'off', 'stop' and 'go' are treated as known classes.
All other classes are used as 'unknown' samples.
See for more information: https://www.kaggle.com/c/tensorflow-speech-recognition-challenge
"""
def __init__(self, folder, transform=None, classes=CLASSES):
all_classes = classes
class_to_idx = {classes[i]: i for i in range(len(classes))}
data = []
for c in all_classes:
d = os.path.join(folder, c)
target = class_to_idx[c]
for f in os.listdir(d):
path = os.path.join(d, f)
data.append((path, target))
self.classes = classes
self.data = data
self.transform = transform
def __len__(self):
return len(self.data)
def __getitem__(self, index):
path, target = self.data[index]
data = {'path': path, 'target': target}
if self.transform is not None:
data = self.transform(data)
return data
def make_weights_for_balanced_classes(self):
"""adopted from https://discuss.pytorch.org/t/balanced-sampling-between-classes-with-torchvision-dataloader/2703/3"""
nclasses = len(self.classes)
count = np.zeros(nclasses)
for item in self.data:
count[item[1]] += 1
N = float(sum(count))
weight_per_class = N / count
weight = np.zeros(len(self))
for idx, item in enumerate(self.data):
weight[idx] = weight_per_class[item[1]]
return weight
class BackgroundNoiseDataset(Dataset):
"""Dataset for silence / background noise."""
def __init__(self, folder, transform=None, sample_rate=16000, sample_length=1):
audio_files = [d for d in os.listdir(folder) if os.path.isfile(os.path.join(folder, d)) and d.endswith('.wav')]
samples = []
for f in audio_files:
path = os.path.join(folder, f)
s, sr = librosa.load(path, sample_rate)
samples.append(s)
samples = np.hstack(samples)
c = int(sample_rate * sample_length)
r = len(samples) // c
self.samples = samples[:r*c].reshape(-1, c)
self.sample_rate = sample_rate
self.classes = CLASSES
self.transform = transform
self.path = folder
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
data = {'samples': self.samples[index], 'sample_rate': self.sample_rate, 'target': 1, 'path': self.path}
if self.transform is not None:
data = self.transform(data)
return data
| state-spaces-main | models/sashimi/sc09_classifier/speech_commands_dataset.py |
"""
Taken from https://github.com/tugstugi/pytorch-speech-commands and modified
by Karan Goel.
"""
import argparse
import os
import torch
import numpy as np
from functools import reduce
from natsort import natsorted
from scipy import linalg
from scipy.stats import norm, entropy
from sklearn.cluster import KMeans
from torch.utils.data import Dataset, DataLoader
from torchvision.transforms import Compose
from tqdm.auto import tqdm
from transforms import FixAudioLength, LoadAudio, ToMelSpectrogram, ToTensor
def fid(feat_data, feat_gen):
"""
Calculate Frechet Inception Distance
"""
# Means
mu_data = np.mean(feat_data, axis=0)
mu_gen = np.mean(feat_gen, axis=0)
# Covariances
try:
sigma_data = np.cov(feat_data, rowvar=False)
sigma_gen = np.cov(feat_gen, rowvar=False)
covmean, _ = linalg.sqrtm(sigma_data.dot(sigma_gen), disp=False)
if not np.isfinite(covmean).all():
print("fid calculation produces singular product; adding perturbation to diagonal of cov estimates")
offset = np.eye(sigma_data.shape[0]) * 1e-4
covmean, _ = linalg.sqrtm((sigma_data + offset).dot(sigma_gen + offset))
# Now calculate the FID
fid_value = np.sum(np.square(mu_gen - mu_data)) + np.trace(sigma_gen + sigma_data - 2*covmean)
return fid_value
except ValueError:
return np.inf
def inception_score(probs_gen):
"""
Calculate Inception Score
"""
# Set seed
np.random.seed(0)
# Shuffle probs_gen
probs_gen = probs_gen[np.random.permutation(len(probs_gen))]
# Split probs_gen into two halves
probs_gen_1 = probs_gen[:len(probs_gen)//2]
probs_gen_2 = probs_gen[len(probs_gen)//2:]
# Calculate average label distribution for split 2
mean_2 = np.mean(probs_gen_2, axis=0)
# Compute the mean kl-divergence between the probability distributions
# of the generated and average label distributions
kl = entropy(probs_gen_1, np.repeat(mean_2[None, :], len(probs_gen_1), axis=0)).mean()
# Compute the expected score
is_score = np.exp(kl)
return is_score
def modified_inception_score(probs_gen, n=10000):
"""
Calculate Modified Inception Score
"""
# Set seed
np.random.seed(0)
n_samples = len(probs_gen)
all_kls = []
for i in range(n):
# Sample two prob vectors
indices = np.random.choice(np.arange(n_samples), size=2, replace=True)
probs_gen_1 = probs_gen[indices[0]]
probs_gen_2 = probs_gen[indices[1]]
# Calculate their KL
kl = entropy(probs_gen_1, probs_gen_2)
all_kls.append(kl)
# Compute the score
mis_score = np.exp(np.mean(all_kls))
return mis_score
def am_score(probs_data, probs_gen):
"""
Calculate AM Score
"""
mean_data = np.mean(probs_data, axis=0)
mean_gen = np.mean(probs_gen, axis=0)
entropy_gen = np.mean(entropy(probs_gen, axis=1))
am_score = entropy(mean_data, mean_gen) + entropy_gen
return am_score
def two_proportions_z_test(p1, n1, p2, n2, significance_level, z_threshold=None):
# Taken from https://github.com/eitanrich/gans-n-gmms/blob/master/utils/ndb.py
# Per http://stattrek.com/hypothesis-test/difference-in-proportions.aspx
# See also http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/binotest.htm
p = (p1 * n1 + p2 * n2) / (n1 + n2)
se = np.sqrt(p * (1 - p) * (1/n1 + 1/n2))
z = (p1 - p2) / se
# Allow defining a threshold in terms as Z (difference relative to the SE) rather than in p-values.
if z_threshold is not None:
return abs(z) > z_threshold
p_values = 2.0 * norm.cdf(-1.0 * np.abs(z)) # Two-tailed test
return p_values < significance_level
def ndb_score(feat_data, feat_gen):
# Run K-Means cluster on feat_data with K=50
kmeans = KMeans(n_clusters=50, random_state=0).fit(feat_data)
# Get cluster labels for feat_data and feat_gen
labels_data = kmeans.predict(feat_data)
labels_gen = kmeans.predict(feat_gen)
# Calculate number of data points in each cluster using np.unique
counts_data = np.unique(labels_data, return_counts=True)[1]
counts_gen = np.zeros_like(counts_data)
values, counts = np.unique(labels_gen, return_counts=True)
counts_gen[values] = counts
# Calculate proportion of data points in each cluster
prop_data = counts_data / len(labels_data)
prop_gen = counts_gen / len(labels_gen)
# Calculate number of bins with statistically different proportions
different_bins = two_proportions_z_test(prop_data, len(labels_data), prop_gen, len(labels_gen), 0.05)
ndb = np.count_nonzero(different_bins)
return ndb/50.
def _nested_getattr(obj, attr, *args):
"""Get a nested property from an object.
Example:
```
model = ...
weights = _nested_getattr(model, "layer4.weights")
```
"""
return reduce(lambda o, a: getattr(o, a, *args), [obj] + attr.split("."))
class ActivationExtractor:
"""Class for extracting activations of a targeted intermediate layer."""
def __init__(self):
self.input = None
self.output = None
def add_hook(self, module, input, output):
self.input = input
self.output = output
class ActivationOp:
def __init__(
self,
model: torch.nn.Module,
target_module: str,
):
self.model = model
self.target_module = target_module
try:
target_module = _nested_getattr(model, target_module)
except torch.nn.modules.module.ModuleAttributeError:
raise ValueError(f"`model` does not have a submodule {target_module}")
self.extractor = ActivationExtractor()
target_module.register_forward_hook(self.extractor.add_hook)
CLASSES = 'zero, one, two, three, four, five, six, seven, eight, nine'.split(', ')
class SpeechCommandsDataset(Dataset):
def __init__(self, folder, transform=None, classes=CLASSES, samples=False):
self.classes = classes
self.transform = transform
if not samples:
class_to_idx = {classes[i]: i for i in range(len(classes))}
data = []
for c in classes:
d = os.path.join(folder, c)
target = class_to_idx[c]
for f in natsorted(os.listdir(d)):
if f.endswith(".wav"):
path = os.path.join(d, f)
data.append((path, target))
else:
data = []
for f in natsorted(os.listdir(folder)):
if f.endswith(".wav"):
path = os.path.join(folder, f)
data.append((path, -1))
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
path, target = self.data[index]
data = {'path': path, 'target': target}
if self.transform is not None:
data = self.transform(data)
return data
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--train-dataset-dir", type=str, default='datasets/speech_commands/train', help='path of test dataset')
parser.add_argument("--test-dataset-dir", type=str, default='datasets/speech_commands/test', help='path of test dataset')
parser.add_argument("--sample-dir", type=str, default='datasets/speech_commands/test', help='path of test dataset')
parser.add_argument("--batch-size", type=int, default=128, help='batch size')
parser.add_argument("--dataload-workers-nums", type=int, default=4, help='number of workers for dataloader')
parser.add_argument("--input", choices=['mel32'], default='mel32', help='input of NN')
parser.add_argument("--threshold", action='store_true', help='tune thresholds to reject samples')
parser.add_argument("--save-probs", action='store_true', help='save classifier probs on samples')
parser.add_argument("model", help='a pretrained neural network model')
args = parser.parse_args()
model = torch.load(args.model)
model.float()
use_gpu = torch.cuda.is_available()
if use_gpu:
torch.backends.cudnn.benchmark = True
model.cuda()
n_mels = 32
if args.input == 'mel40':
n_mels = 40
feature_transform = Compose([ToMelSpectrogram(n_mels=n_mels), ToTensor('mel_spectrogram', 'input')])
transform = Compose([LoadAudio(), FixAudioLength(), feature_transform])
train_dataset = SpeechCommandsDataset(args.train_dataset_dir, transform)
train_dataloader = DataLoader(
train_dataset,
batch_size=args.batch_size,
sampler=None,
pin_memory=use_gpu,
num_workers=args.dataload_workers_nums,
drop_last=False,
shuffle=False,
)
test_dataset = SpeechCommandsDataset(args.test_dataset_dir, transform)
test_dataloader = DataLoader(
test_dataset,
batch_size=args.batch_size,
sampler=None,
pin_memory=use_gpu,
num_workers=args.dataload_workers_nums,
drop_last=False,
shuffle=False,
)
samples_dataset = SpeechCommandsDataset(
args.sample_dir,
transform,
samples=False if args.sample_dir.rstrip("/").endswith('test') or args.sample_dir.rstrip("/").endswith('train') else True,
)
samples_dataloader = DataLoader(
samples_dataset,
batch_size=args.batch_size,
sampler=None,
pin_memory=use_gpu,
num_workers=args.dataload_workers_nums,
drop_last=False,
shuffle=False,
)
@torch.no_grad()
def test(dataloader):
model.eval() # Set model to evaluate mode
extractor = ActivationExtractor()
module = model.module.classifier
module.register_forward_hook(extractor.add_hook)
correct = 0
total = 0
probs = []
activations = []
pbar = tqdm(dataloader, unit="audios", unit_scale=dataloader.batch_size)
for batch in pbar:
inputs = batch['input']
inputs = inputs.unsqueeze(1)
targets = batch['target']
if use_gpu:
inputs = inputs.cuda()
targets = targets.cuda()
# forward
outputs = model(inputs)
outputs = torch.nn.functional.softmax(outputs, dim=1)
pred = outputs.data.max(1, keepdim=True)[1]
correct += pred.eq(targets.data.view_as(pred)).sum()
total += targets.size(0)
probs.append(outputs.cpu().numpy())
activations.append(extractor.input[0].cpu().numpy())
probs = np.concatenate(probs)
activations = np.concatenate(activations)
accuracy = correct/total
print("accuracy: %f%%" % (100*accuracy))
return probs, activations
# Run test if train_probs and train_activations are not on disk
if not os.path.exists('cache/train_probs.npy') or not os.path.exists('cache/train_activations.npy'):
train_probs, train_activations = test(train_dataloader)
np.save('cache/train_probs.npy', train_probs)
np.save('cache/train_activations.npy', train_activations)
else:
train_probs = np.load('cache/train_probs.npy')
train_activations = np.load('cache/train_activations.npy')
# Same for test
if not os.path.exists('cache/test_probs.npy') or not os.path.exists('cache/test_activations.npy'):
test_probs, test_activations = test(test_dataloader)
os.makedirs('cache', exist_ok=True)
np.save('cache/test_probs.npy', test_probs)
np.save('cache/test_activations.npy', test_activations)
else:
test_probs = np.load('cache/test_probs.npy')
test_activations = np.load('cache/test_activations.npy')
###############################################################################
# Calculate all scores
###############################################################################
print("------------------")
print("Train Set Scores")
print("------------------")
print('\tFID:', fid(train_activations, train_activations))
print('\tInception:', inception_score(train_probs))
print('\tM Inception:', modified_inception_score(train_probs))
print('\tAM:', am_score(train_probs, train_probs))
# print('\tNDB:', 0.)
print("------------------")
print("Test Set Scores")
print("------------------")
print('\tFID:', fid(train_activations, test_activations))
print('\tInception:', inception_score(test_probs))
print('\tM Inception:', modified_inception_score(test_probs))
print('\tAM:', am_score(train_probs, test_probs))
# print('\tNDB:', ndb_score(train_activations, test_activations))
# Train -> Samples
samples_probs, samples_activations = test(samples_dataloader)
if args.threshold:
n_val = len(samples_probs) // 2
n_test = len(samples_probs) // 2
print("Tuning thresholds using IS: using %d samples for tuning and %d for calculating metrics" % (n_val, n_test))
# Split into two parts, one for tuning thresholds and one for calculating metrics
val_indices = sorted(np.random.choice(len(samples_probs), size=n_val, replace=False))
test_indices = sorted(np.array(list(set(range(len(samples_probs))) - set(val_indices))))
samples_probs_val = samples_probs[val_indices]
samples_probs_test = samples_probs[test_indices]
samples_activations_val = samples_activations[val_indices]
samples_activations_test = samples_activations[test_indices]
# Iterate over all thresholds
all_scores = {'fid': {}, 'is': {}}
for lower_threshold in tqdm(np.arange(0., 0.5, 0.1)):
for upper_threshold in tqdm(np.arange(0.6, 1.0, 0.05)):
all_scores['is'][(lower_threshold, upper_threshold)] = inception_score(samples_probs_val[int(lower_threshold * n_val):int(upper_threshold * n_val)])
# Find the best score and calculate all metrics on the test set
best_value = 0.
best_thresholds_is = None
for threshold, value in all_scores['is'].items():
if value > best_value:
best_thresholds_is = threshold
print("------------------")
print("Tuned Thresholds")
print("------------------")
print("\tBest thresholds (by IS tuning):", best_thresholds_is)
print("\tBest IS score (on dev set):", all_scores['is'][best_thresholds_is])
sample_activations_test_inception = samples_activations_test[int(best_thresholds_is[0] * n_test):int(best_thresholds_is[1] * n_test)]
sample_probs_test_inception = samples_probs_test[int(best_thresholds_is[0] * n_test):int(best_thresholds_is[1] * n_test)]
print("------------------")
print("Sample Scores (with Tuned Thresholds)")
print("------------------")
print('\tFID:', fid(train_activations, sample_activations_test_inception))
print('\tInception:', inception_score(sample_probs_test_inception))
print('\tM Inception:', modified_inception_score(sample_probs_test_inception))
print('\tAM:', am_score(train_probs, sample_probs_test_inception))
# print('\tNDB:', ndb_score(train_activations, sample_activations_test_inception))
else:
print("------------------")
print("Sample Scores (no Threshold Tuning)")
print("------------------")
print('\tFID:', fid(train_activations, samples_activations))
print('\tInception:', inception_score(samples_probs))
print('\tM Inception:', modified_inception_score(samples_probs))
print('\tAM:', am_score(train_probs, samples_probs))
# print('\tNDB:', ndb_score(train_activations, samples_activations))
if args.save_probs:
filename = args.sample_dir.rstrip("/").split("/")[-1]
np.save(f'cache/{filename}-resnext-probs.npy', samples_probs)
# Info about probs
# print(np.unique(np.argmax(samples_probs, axis=1), return_counts=True))
# print(samples_probs[np.arange(samples_probs.shape[0]), np.argmax(samples_probs, axis=1)])
# print(samples_probs)
| state-spaces-main | models/sashimi/sc09_classifier/test_speech_commands.py |
"""Splits the google speech commands into train, validation and test sets.
"""
import os
import shutil
import argparse
def move_files(src_folder, to_folder, list_file):
with open(list_file) as f:
for line in f.readlines():
line = line.rstrip()
dirname = os.path.dirname(line)
dest = os.path.join(to_folder, dirname)
if not os.path.exists(dest):
os.mkdir(dest)
shutil.move(os.path.join(src_folder, line), dest)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Split google commands train dataset.')
parser.add_argument('root', type=str, help='the path to the root folder of te google commands train dataset.')
args = parser.parse_args()
audio_folder = os.path.join(args.root, 'audio')
validation_path = os.path.join(audio_folder, 'validation_list.txt')
test_path = os.path.join(audio_folder, 'testing_list.txt')
valid_folder = os.path.join(args.root, 'valid')
test_folder = os.path.join(args.root, 'test')
train_folder = os.path.join(args.root, 'train')
os.mkdir(valid_folder)
os.mkdir(test_folder)
move_files(audio_folder, test_folder, test_path)
move_files(audio_folder, valid_folder, validation_path)
os.rename(audio_folder, train_folder)
| state-spaces-main | models/sashimi/sc09_classifier/datasets/speech_commands/split_dataset.py |
# -*- coding: utf-8 -*-
"""Imported from https://github.com/prlz77/ResNeXt.pytorch/blob/master/models/model.py
and added support for the 1x32x32 mel spectrogram for the speech recognition.
Creates a ResNeXt Model as defined in:
Xie, S., Girshick, R., Dollár, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
"""
__author__ = "Pau Rodríguez López, ISELAB, CVC-UAB"
__email__ = "[email protected]"
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
__all__ = [ 'CifarResNeXt' ]
class ResNeXtBottleneck(nn.Module):
"""
RexNeXt bottleneck type C (https://github.com/facebookresearch/ResNeXt/blob/master/models/resnext.lua)
"""
def __init__(self, in_channels, out_channels, stride, cardinality, base_width, widen_factor):
""" Constructor
Args:
in_channels: input channel dimensionality
out_channels: output channel dimensionality
stride: conv stride. Replaces pooling layer.
cardinality: num of convolution groups.
base_width: base number of channels in each group.
widen_factor: factor to reduce the input dimensionality before convolution.
"""
super(ResNeXtBottleneck, self).__init__()
width_ratio = out_channels / (widen_factor * 64.)
D = cardinality * int(base_width * width_ratio)
self.conv_reduce = nn.Conv2d(in_channels, D, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_reduce = nn.BatchNorm2d(D)
self.conv_conv = nn.Conv2d(D, D, kernel_size=3, stride=stride, padding=1, groups=cardinality, bias=False)
self.bn = nn.BatchNorm2d(D)
self.conv_expand = nn.Conv2d(D, out_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn_expand = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('shortcut_conv',
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0,
bias=False))
self.shortcut.add_module('shortcut_bn', nn.BatchNorm2d(out_channels))
def forward(self, x):
bottleneck = self.conv_reduce.forward(x)
bottleneck = F.relu(self.bn_reduce.forward(bottleneck), inplace=True)
bottleneck = self.conv_conv.forward(bottleneck)
bottleneck = F.relu(self.bn.forward(bottleneck), inplace=True)
bottleneck = self.conv_expand.forward(bottleneck)
bottleneck = self.bn_expand.forward(bottleneck)
residual = self.shortcut.forward(x)
return F.relu(residual + bottleneck, inplace=True)
class CifarResNeXt(nn.Module):
"""
ResNext optimized for the Cifar dataset, as specified in
https://arxiv.org/pdf/1611.05431.pdf
"""
def __init__(self, nlabels, cardinality=8, depth=29, base_width=64, widen_factor=4, in_channels=3):
""" Constructor
Args:
cardinality: number of convolution groups.
depth: number of layers.
nlabels: number of classes
base_width: base number of channels in each group.
widen_factor: factor to adjust the channel dimensionality
"""
super(CifarResNeXt, self).__init__()
self.cardinality = cardinality
self.depth = depth
self.block_depth = (self.depth - 2) // 9
self.base_width = base_width
self.widen_factor = widen_factor
self.nlabels = nlabels
self.output_size = 64
self.stages = [64, 64 * self.widen_factor, 128 * self.widen_factor, 256 * self.widen_factor]
self.conv_1_3x3 = nn.Conv2d(in_channels, 64, 3, 1, 1, bias=False)
self.bn_1 = nn.BatchNorm2d(64)
self.stage_1 = self.block('stage_1', self.stages[0], self.stages[1], 1)
self.stage_2 = self.block('stage_2', self.stages[1], self.stages[2], 2)
self.stage_3 = self.block('stage_3', self.stages[2], self.stages[3], 2)
self.classifier = nn.Linear(self.stages[3], nlabels)
init.kaiming_normal(self.classifier.weight)
for key in self.state_dict():
if key.split('.')[-1] == 'weight':
if 'conv' in key:
init.kaiming_normal(self.state_dict()[key], mode='fan_out')
if 'bn' in key:
self.state_dict()[key][...] = 1
elif key.split('.')[-1] == 'bias':
self.state_dict()[key][...] = 0
def block(self, name, in_channels, out_channels, pool_stride=2):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
name: string name of the current block.
in_channels: number of input channels
out_channels: number of output channels
pool_stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
block = nn.Sequential()
for bottleneck in range(self.block_depth):
name_ = '%s_bottleneck_%d' % (name, bottleneck)
if bottleneck == 0:
block.add_module(name_, ResNeXtBottleneck(in_channels, out_channels, pool_stride, self.cardinality,
self.base_width, self.widen_factor))
else:
block.add_module(name_,
ResNeXtBottleneck(out_channels, out_channels, 1, self.cardinality, self.base_width,
self.widen_factor))
return block
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.avg_pool2d(x, 8, 1)
x = x.view(-1, self.stages[3])
return self.classifier(x)
| state-spaces-main | models/sashimi/sc09_classifier/models/resnext.py |
"""Transforms on raw wav samples."""
__author__ = 'Yuan Xu'
import random
import numpy as np
import librosa
import torch
from torch.utils.data import Dataset
def should_apply_transform(prob=0.5):
"""Transforms are only randomly applied with the given probability."""
return random.random() < prob
class LoadAudio(object):
"""Loads an audio into a numpy array."""
def __init__(self, sample_rate=16000):
self.sample_rate = sample_rate
def __call__(self, data):
path = data['path']
if path:
samples, sample_rate = librosa.load(path, sr=self.sample_rate)
else:
# silence
sample_rate = self.sample_rate
samples = np.zeros(sample_rate, dtype=np.float32)
data['samples'] = samples
data['sample_rate'] = sample_rate
return data
class FixAudioLength(object):
"""Either pads or truncates an audio into a fixed length."""
def __init__(self, time=1):
self.time = time
def __call__(self, data):
samples = data['samples']
sample_rate = data['sample_rate']
length = int(self.time * sample_rate)
if length < len(samples):
data['samples'] = samples[:length]
elif length > len(samples):
data['samples'] = np.pad(samples, (0, length - len(samples)), "constant")
return data
class ChangeAmplitude(object):
"""Changes amplitude of an audio randomly."""
def __init__(self, amplitude_range=(0.7, 1.1)):
self.amplitude_range = amplitude_range
def __call__(self, data):
if not should_apply_transform():
return data
data['samples'] = data['samples'] * random.uniform(*self.amplitude_range)
return data
class ChangeSpeedAndPitchAudio(object):
"""Change the speed of an audio. This transform also changes the pitch of the audio."""
def __init__(self, max_scale=0.2):
self.max_scale = max_scale
def __call__(self, data):
if not should_apply_transform():
return data
samples = data['samples']
sample_rate = data['sample_rate']
scale = random.uniform(-self.max_scale, self.max_scale)
speed_fac = 1.0 / (1 + scale)
data['samples'] = np.interp(np.arange(0, len(samples), speed_fac), np.arange(0,len(samples)), samples).astype(np.float32)
return data
class StretchAudio(object):
"""Stretches an audio randomly."""
def __init__(self, max_scale=0.2):
self.max_scale = max_scale
def __call__(self, data):
if not should_apply_transform():
return data
scale = random.uniform(-self.max_scale, self.max_scale)
data['samples'] = librosa.effects.time_stretch(data['samples'], 1+scale)
return data
class TimeshiftAudio(object):
"""Shifts an audio randomly."""
def __init__(self, max_shift_seconds=0.2):
self.max_shift_seconds = max_shift_seconds
def __call__(self, data):
if not should_apply_transform():
return data
samples = data['samples']
sample_rate = data['sample_rate']
max_shift = (sample_rate * self.max_shift_seconds)
shift = random.randint(-max_shift, max_shift)
a = -min(0, shift)
b = max(0, shift)
samples = np.pad(samples, (a, b), "constant")
data['samples'] = samples[:len(samples) - a] if a else samples[b:]
return data
class AddBackgroundNoise(Dataset):
"""Adds a random background noise."""
def __init__(self, bg_dataset, max_percentage=0.45):
self.bg_dataset = bg_dataset
self.max_percentage = max_percentage
def __call__(self, data):
if not should_apply_transform():
return data
samples = data['samples']
noise = random.choice(self.bg_dataset)['samples']
percentage = random.uniform(0, self.max_percentage)
data['samples'] = samples * (1 - percentage) + noise * percentage
return data
class ToMelSpectrogram(object):
"""Creates the mel spectrogram from an audio. The result is a 32x32 matrix."""
def __init__(self, n_mels=32):
self.n_mels = n_mels
def __call__(self, data):
samples = data['samples']
sample_rate = data['sample_rate']
s = librosa.feature.melspectrogram(y=samples, sr=sample_rate, n_mels=self.n_mels)
data['mel_spectrogram'] = librosa.power_to_db(s, ref=np.max)
return data
class ToTensor(object):
"""Converts into a tensor."""
def __init__(self, np_name, tensor_name, normalize=None):
self.np_name = np_name
self.tensor_name = tensor_name
self.normalize = normalize
def __call__(self, data):
tensor = torch.FloatTensor(data[self.np_name])
if self.normalize is not None:
mean, std = self.normalize
tensor -= mean
tensor /= std
data[self.tensor_name] = tensor
return data
| state-spaces-main | models/sashimi/sc09_classifier/transforms/transforms_wav.py |
from .transforms_wav import *
from .transforms_stft import *
| state-spaces-main | models/sashimi/sc09_classifier/transforms/__init__.py |
"""Transforms on the short time fourier transforms of wav samples."""
__author__ = 'Erdene-Ochir Tuguldur'
import random
import numpy as np
import librosa
from torch.utils.data import Dataset
from .transforms_wav import should_apply_transform
class ToSTFT(object):
"""Applies on an audio the short time fourier transform."""
def __init__(self, n_fft=2048, hop_length=512):
self.n_fft = n_fft
self.hop_length = hop_length
def __call__(self, data):
samples = data['samples']
sample_rate = data['sample_rate']
data['n_fft'] = self.n_fft
data['hop_length'] = self.hop_length
data['stft'] = librosa.stft(samples, n_fft=self.n_fft, hop_length=self.hop_length)
data['stft_shape'] = data['stft'].shape
return data
class StretchAudioOnSTFT(object):
"""Stretches an audio on the frequency domain."""
def __init__(self, max_scale=0.2):
self.max_scale = max_scale
def __call__(self, data):
if not should_apply_transform():
return data
stft = data['stft']
sample_rate = data['sample_rate']
hop_length = data['hop_length']
scale = random.uniform(-self.max_scale, self.max_scale)
stft_stretch = librosa.core.phase_vocoder(stft, 1+scale, hop_length=hop_length)
data['stft'] = stft_stretch
return data
class TimeshiftAudioOnSTFT(object):
"""A simple timeshift on the frequency domain without multiplying with exp."""
def __init__(self, max_shift=8):
self.max_shift = max_shift
def __call__(self, data):
if not should_apply_transform():
return data
stft = data['stft']
shift = random.randint(-self.max_shift, self.max_shift)
a = -min(0, shift)
b = max(0, shift)
stft = np.pad(stft, ((0, 0), (a, b)), "constant")
if a == 0:
stft = stft[:,b:]
else:
stft = stft[:,0:-a]
data['stft'] = stft
return data
class AddBackgroundNoiseOnSTFT(Dataset):
"""Adds a random background noise on the frequency domain."""
def __init__(self, bg_dataset, max_percentage=0.45):
self.bg_dataset = bg_dataset
self.max_percentage = max_percentage
def __call__(self, data):
if not should_apply_transform():
return data
noise = random.choice(self.bg_dataset)['stft']
percentage = random.uniform(0, self.max_percentage)
data['stft'] = data['stft'] * (1 - percentage) + noise * percentage
return data
class FixSTFTDimension(object):
"""Either pads or truncates in the time axis on the frequency domain, applied after stretching, time shifting etc."""
def __call__(self, data):
stft = data['stft']
t_len = stft.shape[1]
orig_t_len = data['stft_shape'][1]
if t_len > orig_t_len:
stft = stft[:,0:orig_t_len]
elif t_len < orig_t_len:
stft = np.pad(stft, ((0, 0), (0, orig_t_len-t_len)), "constant")
data['stft'] = stft
return data
class ToMelSpectrogramFromSTFT(object):
"""Creates the mel spectrogram from the short time fourier transform of a file. The result is a 32x32 matrix."""
def __init__(self, n_mels=32):
self.n_mels = n_mels
def __call__(self, data):
stft = data['stft']
sample_rate = data['sample_rate']
n_fft = data['n_fft']
mel_basis = librosa.filters.mel(sr=sample_rate, n_fft=n_fft, n_mels=self.n_mels)
s = np.dot(mel_basis, np.abs(stft)**2.0)
data['mel_spectrogram'] = librosa.power_to_db(s, ref=np.max)
return data
class DeleteSTFT(object):
"""Pytorch doesn't like complex numbers, use this transform to remove STFT after computing the mel spectrogram."""
def __call__(self, data):
del data['stft']
return data
class AudioFromSTFT(object):
"""Inverse short time fourier transform."""
def __call__(self, data):
stft = data['stft']
data['istft_samples'] = librosa.core.istft(stft, dtype=data['samples'].dtype)
return data
| state-spaces-main | models/sashimi/sc09_classifier/transforms/transforms_stft.py |
TEMPL = """
<style>
th, td {{
border: 2px solid black;
padding: 8px;
}}
th {{
width: 100px;
vertical-align: text-top;
font-weight: normal;
}}
.noborder {{
border: 0px solid black;
}}
.thlab {{
margin-bottom: 1em;
}}
.td {{
text-align: center;
vertical-align: middle;
}}
input[type=radio] {{
border: 0px;
width: 100%;
height: 4em;
}}
audio {{
width: 300px;
}}
input[type=submit] {{
margin-top: 20px;
width: 20em;
height: 2em;
}}
</style>
<html>
<h2>Rate and annotate audio files containing spoken digits.</h2>
<p><b>Please use headphones in a quiet environment if possible. Read the instructions below carefully before starting the task.</b></p>
<p>You are presented a batch of recordings and asked to classify what digit you hear in each of them. If you are unsure which digit it is, select the one that sounds most like the recording to you.</p>
<p>You are also asked to rate the intelligibility of each recording</p>
<p><b>Intelligibility:</b> How easily could you identify the recorded digits? Are they impossible to classify (not at all intelligible) or very easy to understand (extremely intelligible)?</p>
<p>At the bottom, you'll be asked to provide your opinion of the recordings.</p>
<p><b>Think about the recordings you heard when answering these questions.</b></p>
<p><b>Quality:</b> How clear is the audio on average? Does it sound like it's coming from a walkie-talkie (bad quality) or a studio-quality sound system (excellent quality)?</p>
<p><b>Diversity:</b> How diverse are the speakers in the recordings on average? Do they mostly sound similar (not at all diverse) or are there many speakers represented (extremely diverse)?</p>
<div class="form-group">
<table>
<tbody>
<tr>
<th class="noborder"></th>
<th colspan=10><div class="thlab"><b>Digit Classification</b></div></th>
<th class="noborder"></th>
<th colspan=5><div class="thlab"><b>Digit Intelligibility</b></div></th>
</tr>
<tr>
<th class="noborder"></th>
<th><div class="thlab">0</div><div>Zero</div></th>
<th><div class="thlab">1</div><div>One</div></th>
<th><div class="thlab">2</div><div>Two</div></th>
<th><div class="thlab">3</div><div>Three</div></th>
<th><div class="thlab">4</div><div>Four</div></th>
<th><div class="thlab">5</div><div>Five</div></th>
<th><div class="thlab">6</div><div>Six</div></th>
<th><div class="thlab">7</div><div>Seven</div></th>
<th><div class="thlab">8</div><div>Eight</div></th>
<th><div class="thlab">9</div><div>Nine</div></th>
<th class="noborder"></th>
<th><div class="thlab"><b>1: Not at all</b></div><div>Not at all intelligible</div></th>
<th><div class="thlab"><b>2: Slightly</b></div><div>Slightly intelligible</div></th>
<th><div class="thlab"><b>3: Moderately</b></div><div>Moderately intelligible</div></th>
<th><div class="thlab"><b>4: Very</b></div><div>Very intelligible</div></th>
<th><div class="thlab"><b>5: Extremely</b></div><div>Extremely intelligible</div></th>
</tr>
{rows}
</tbody>
</table>
<table>
<tbody>
<tr>
<th class="noborder"></th>
<th colspan=5><div class="thlab"><b>Audio Quality</b></div></th>
<th class="noborder"></th>
<th colspan=5><div class="thlab"><b>Speaker Diversity</b></div></th>
</tr>
<tr>
<th class="noborder"></th>
<th><div class="thlab"><b>1: Bad</b></div><div>Very noisy audio</div></th>
<th><div class="thlab"><b>2: Poor</b></div><div>Mostly noisy audio</div></th>
<th><div class="thlab"><b>3: Fair</b></div><div>Somewhat clear audio</div></th>
<th><div class="thlab"><b>4: Good</b></div><div>Mostly clear audio</div></th>
<th><div class="thlab"><b>5: Excellent</b></div><div>Clear audio</div></th>
<th class="noborder"></th>
<th><div class="thlab"><b>1: Not at all</b></div><div>Not at all diverse (none or almost no distinct speakers)</div></th>
<th><div class="thlab"><b>2: Slightly</b></div><div>Slightly diverse (few distinct speakers)</div></th>
<th><div class="thlab"><b>3: Moderately</b></div><div>Moderately diverse (many distinct speakers) </div></th>
<th><div class="thlab"><b>4: Very</b></div><div>Very diverse (almost all distinct speakers)</div></th>
<th><div class="thlab"><b>5: Extremely</b></div><div>Extremely diverse (all distinct speakers)</div></th>
</tr>
<tr>
<th class="noborder"></th>
<td><input class="form-control" type="radio" required="" name="quality" value="1"></td>
<td><input class="form-control" type="radio" required="" name="quality" value="2"></td>
<td><input class="form-control" type="radio" required="" name="quality" value="3"></td>
<td><input class="form-control" type="radio" required="" name="quality" value="4"></td>
<td><input class="form-control" type="radio" required="" name="quality" value="5"></td>
<th class="noborder"></th>
<td><input class="form-control" type="radio" required="" name="diversity" value="1"></td>
<td><input class="form-control" type="radio" required="" name="diversity" value="2"></td>
<td><input class="form-control" type="radio" required="" name="diversity" value="3"></td>
<td><input class="form-control" type="radio" required="" name="diversity" value="4"></td>
<td><input class="form-control" type="radio" required="" name="diversity" value="5"></td>
</tr>
</tbody>
</table>
<input type="submit">
</div>
</html>
"""
ROW_TEMPL = """
<tr>
<td><audio controls=""><source src="${{recording_{i}_url}}" type="audio/mpeg"/></audio></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="0"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="1"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="2"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="3"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="4"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="5"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="6"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="7"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="8"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_digit" value="9"></td>
<th class="noborder"></th>
<td><input class="form-control" type="radio" required="" name="recording_{i}_intelligibility" value="1"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_intelligibility" value="2"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_intelligibility" value="3"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_intelligibility" value="4"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_intelligibility" value="5"></td>
</tr>
"""
import sys
n = int(sys.argv[1])
rows = []
for i in range(n):
rows.append(ROW_TEMPL.format(i=i))
rows = '\n'.join(rows)
print(TEMPL.format(rows=rows))
| state-spaces-main | models/sashimi/mturk/template_speech.py |
import argparse
import numpy as np
import os
import shutil
from natsort import natsorted
digits = 'zero one two three four five six seven eight nine'.split()
def move_files(src_dir, src_files, target_dir, indices, discard=False):
os.makedirs(target_dir, exist_ok=True)
for i, digit in enumerate(digits):
try:
os.mkdir(target_dir + f'{digit}')
except FileExistsError:
raise FileExistsError(f'{target_dir}/{digit} already exists, please delete it before running this script.')
for index in indices[i]:
if not discard:
shutil.copy(f'{src_dir}/{src_files[index]}', f'{target_dir}/{digit}/{src_files[index]}')
else:
shutil.copy(f'{src_dir}/{src_files[index]}', f'{target_dir}/{digit}/{src_files[index].split("/")[-1]}')
def standardize_filenames(target_dir):
i = 0
for digit in digits:
if not os.path.exists(f'{target_dir}/{digit}/'): continue
for f in natsorted(os.listdir(f'{target_dir}/{digit}/')):
if f.endswith('.wav'):
shutil.move(f'{target_dir}/{digit}/{f}', f'{target_dir}/{i}.wav')
i += 1
shutil.rmtree(f'{target_dir}/{digit}/')
def grab_indices(probs, samples_per_class=50):
confident_indices = {}
random_indices = {}
for digit in range(10):
# Rows with prediction = digit
rows = np.zeros_like(probs)
rows[probs.argmax(1) == digit] = probs[probs.argmax(1) == digit]
# Sort rows by confidence and take the last 50 indices
confident_indices[digit] = np.argsort(rows[:, digit])[-samples_per_class:]
# Take a random sample of 50 digits
random_indices[digit] = np.random.choice(np.where(probs.argmax(1) == digit)[0], samples_per_class, replace=False)
return confident_indices, random_indices
def prepare(
method,
cache_dir = '../sc09_classifier/cache/',
sample_dir='../samples/sc09/',
target_dir='sc09/sc09-unconditional-exp-confident/',
n_samples=2048,
samples_per_class=50,
):
# Load outputs of SC09 classifier (ResNeXt)
# Example: `2048-sashimi-diffwave-small-500k-resnext-probs.npy` (where method is `sashimi-diffwave-small-500k`)
probs = np.load(f'{cache_dir}/{n_samples}-{method}-resnext-probs.npy')
# List all .wav sample files in the method directory
# Example: all .wav files in `../samples/sc09/sashimi-diffwave-small-500k/`
files = list(natsorted([e for e in os.listdir(f'{sample_dir}/{n_samples}-{method}') if e.endswith('.wav')]))
# Grab indices of the top 50 most-confident samples for each digit
indices, _ = grab_indices(probs, samples_per_class)
# Move the top 50 confident samples for each digit to the method directory
move_files(
f'{sample_dir}/{n_samples}-{method}',
files,
f'{target_dir}/{method}/',
indices,
)
# Rename the files to `0.wav, 1.wav, ...` and flatten the target directory structure
standardize_filenames(f'{target_dir}/{method}')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--methods', type=str, nargs='+', help='methods to prepare', required=True)
parser.add_argument('--cache_dir', type=str, default='../sc09_classifier/cache/')
parser.add_argument('--sample_dir', type=str, default='../samples/sc09/')
parser.add_argument('--target_dir', type=str, default='sc09/sc09-unconditional-exp-confident-repro/')
parser.add_argument('--n_samples', type=int, default=2048)
parser.add_argument('--samples_per_class', type=int, default=50)
args = parser.parse_args()
for method in args.methods:
print(f'Preparing {method}...')
prepare(
method,
args.cache_dir,
args.sample_dir,
args.target_dir,
args.n_samples,
args.samples_per_class,
)
print('Done!') | state-spaces-main | models/sashimi/mturk/prepare_sc09.py |
TEMPL = """
<style>
th, td {{
border: 2px solid black;
padding: 8px;
}}
th {{
width: 100px;
vertical-align: text-top;
font-weight: normal;
}}
.noborder {{
border: 0px solid black;
}}
.thlab {{
margin-bottom: 1em;
}}
.td {{
text-align: center;
vertical-align: middle;
}}
input[type=radio] {{
border: 0px;
width: 100%;
height: 4em;
}}
audio {{
width: 300px;
}}
input[type=submit] {{
margin-top: 20px;
width: 20em;
height: 2em;
}}
</style>
<html>
<h2>Rate the audio fidelity and musicality of piano music.</h2>
<p><b>Please use headphones in a quiet environment if possible.</b></p>
<p><b>Some files may be loud, so we recommend keeping volumes at a moderate level.</b></p>
<p>You will be presented a batch of recordings and asked to rate each of them on audio fidelity and musicality.</p>
<p>Some are computer generated, while others are performed by a human.</p>
<p><b>Fidelity:</b> How clear is the audio? Does it sound like it's coming from a walkie-talkie (bad fidelity) or a studio-quality sound system (excellent fidelity)?</p>
<p><b>Musicality:</b> To what extent does the recording sound like real piano music? Does it change in unusual ways (bad musicality) or is it musically consistent (excellent musicality)?</p>
<div class="form-group">
<table>
<tbody>
<tr>
<th class="noborder"></th>
<th colspan=5><div class="thlab"><b>Fidelity</b></div></th>
<th class="noborder"></th>
<th colspan=5><div class="thlab"><b>Musicality</b></div></th>
</tr>
<tr>
<th class="noborder"></th>
<th><div class="thlab"><b>1: Bad</b></div><div>Very noisy audio</div></th>
<th><div class="thlab"><b>2: Poor</b></div><div>Mostly noisy audio</div></th>
<th><div class="thlab"><b>3: Fair</b></div><div>Somewhat clear audio</div></th>
<th><div class="thlab"><b>4: Good</b></div><div>Mostly clear audio</div></th>
<th><div class="thlab"><b>5: Excellent</b></div><div>Clear audio</div></th>
<th class="noborder"></th>
<th><div class="thlab"><b>1: Not at all</b></div><div>Not musical at all</div></th>
<th><div class="thlab"><b>2: Slightly</b></div><div>Somewhat musical</div></th>
<th><div class="thlab"><b>3: Moderately</b></div><div>Moderately musical</div></th>
<th><div class="thlab"><b>4: Very</b></div><div>Very musical</div></th>
<th><div class="thlab"><b>5: Extremely</b></div><div>Extremely musical</div></th>
</tr>
{rows}
</tbody>
</table>
<input type="submit">
</div>
</html>
"""
ROW_TEMPL = """
<tr>
<td><audio controls=""><source src="${{recording_{i}_url}}" type="audio/mpeg"/></audio></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_quality" value="1"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_quality" value="2"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_quality" value="3"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_quality" value="4"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_quality" value="5"></td>
<th class="noborder"></th>
<td><input class="form-control" type="radio" required="" name="recording_{i}_musicality" value="1"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_musicality" value="2"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_musicality" value="3"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_musicality" value="4"></td>
<td><input class="form-control" type="radio" required="" name="recording_{i}_musicality" value="5"></td>
</tr>
"""
import sys
n = int(sys.argv[1])
rows = []
for i in range(n):
rows.append(ROW_TEMPL.format(i=i))
rows = '\n'.join(rows)
print(TEMPL.format(rows=rows))
| state-spaces-main | models/sashimi/mturk/template_music.py |
import argparse
import random
import shutil
import uuid
from natsort import natsorted
from pathlib import Path
from types import SimpleNamespace
rd = random.Random()
rd.seed(0)
uuid.uuid4 = lambda: uuid.UUID(int=rd.getrandbits(128))
class Experiment:
def __init__(
self,
condition,
input_dir,
output_dir,
url_templ,
methods,
):
self.condition = condition
self.input_dir = Path(input_dir)
self.output_dir = Path(output_dir)
self.url_templ = url_templ
self.methods = methods
self._verify_input_dir()
self._verify_input_filenames()
self._shuffle_files()
self.uids = self.create_uids()
def _verify_input_dir(self):
# Check that input_dir exists and contains folders for each method
for method in self.methods:
assert self.input_dir.joinpath(method).exists()
def _verify_input_filenames(self):
# Check that each method has the same number of files and identical filenames
filenames = set([file.name for file in self.input_dir.joinpath(self.methods[0]).glob('*.wav')])
for method in self.methods[1:]:
files = set(self.input_dir.joinpath(method).glob('*.wav'))
assert len(files) == len(filenames)
for file in files:
assert file.name in filenames, f'{file.name} is not in the set of filenames'
self.filenames = list(natsorted(filenames))
print("Found {} files".format(len(self.filenames)))
def _shuffle_files(self):
# Shuffle the filenames
random.seed(42)
random.shuffle(self.filenames)
def create_uids(self):
# Construct a table mapping each (method, filename) to a unique ID
random.seed(42)
uids = {}
_uuids = set()
for method in self.methods:
for filename in self.filenames:
# Generate a unique ID
uid = uuid.uuid4().hex
while uid in _uuids:
uid = uuid.uuid4().hex
_uuids.add(uid)
uids[(method, filename)] = uid
return uids
def construct_batches(self, batch_size: int):
assert batch_size > 0
assert len(self.filenames) % batch_size == 0, 'batch_size must evenly divide the number of files'
# Split the files into batches
batches = [self.filenames[i:i+batch_size] for i in range(0, len(self.filenames), batch_size)]
return batches
def create_output_dir(self):
# Create the output directory
self.output_dir.mkdir(parents=True, exist_ok=True)
# Create a subdirectory for the condition
self.output_dir.joinpath(self.condition).mkdir(parents=True, exist_ok=False)
def process_data(self, batch_size: int):
# Set random seed
random.seed(42)
# Create output directory
self.create_output_dir()
# Construct batches
# Each batch contains a fixed set of filenames, and includes those filenames for all methods
batches = self.construct_batches(batch_size)
# 1. Create a subdirectory for each batch
# 2. For each method, copy the waveforms into the subdirectory after renaming them using self.uids
# 3. Create a list of URLs for each batch
urls_by_batch = {}
for i, batch in enumerate(batches):
urls = []
# Create output directory
batch_dir = self.output_dir.joinpath(self.condition).joinpath(str(i))
batch_dir.mkdir(parents=True, exist_ok=False)
# Copy files into the batch directory
for method in self.methods:
for filename in batch:
src = self.input_dir.joinpath(method).joinpath(filename)
dst = batch_dir.joinpath(f'{self.uids[(method, filename)]}.wav')
shutil.copy(src, dst)
urls.append(self.get_url(self.condition, str(i), self.uids[(method, filename)]))
# Shuffle the URLs to randomize the order of the waveforms
random.shuffle(urls)
urls_by_batch[str(i)] = urls
# Store `batches` to disk
with open(self.output_dir.joinpath(self.condition).joinpath('batches.txt'), 'w') as f:
for batch in batches:
f.write(' '.join(batch) + '\n')
# Store `urls_by_batch` to disk
with open(self.output_dir.joinpath(self.condition).joinpath('urls.csv'), 'w') as f:
urls = [','.join([f'recording_{i}_url' for i in range(len(self.methods) * batch_size)])] + [",".join(urls) for _, urls in urls_by_batch.items()]
f.write('\n'.join(urls))
# Store `urls_by_batch` by batch
for batch, urls in urls_by_batch.items():
with open(self.output_dir.joinpath(self.condition).joinpath(f'urls_{batch}.csv'), 'w') as f:
urls = [','.join([f'recording_{i}_url' for i in range(len(self.methods) * batch_size)])] + [",".join(urls)]
f.write('\n'.join(urls))
# Store information in `self.uids` to disk
with open(self.output_dir.joinpath(self.condition).joinpath('uids.txt'), 'w') as f:
for (method, filename), uid in self.uids.items():
f.write(f'{method} {filename} {uid}\n')
return SimpleNamespace(
batches=batches,
urls_by_batch=urls_by_batch,
)
def get_url(self, condition, batch, uid):
return self.url_templ.format(condition, batch, uid)
def upload_data():
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--condition', type=str, required=True)
parser.add_argument('--input_dir', type=str, required=True)
parser.add_argument('--output_dir', type=str, required=True)
parser.add_argument('--url_templ', type=str, default='https://storage.googleapis.com/mturk-experiments/{}/{}/{}.wav')
parser.add_argument('--methods', nargs='+', required=True)
parser.add_argument('--batch_size', type=int, default=1)
args = parser.parse_args()
experiment = Experiment(
condition=args.condition,
input_dir=args.input_dir,
output_dir=args.output_dir,
url_templ=args.url_templ,
methods=args.methods,
)
experiment.process_data(batch_size=args.batch_size)
| state-spaces-main | models/sashimi/mturk/turk_create_batch.py |
import math
import torch
import torch.nn.functional as F
import pytest
from einops import rearrange
from src.ops.vandermonde import log_vandermonde, log_vandermonde_fast
@pytest.mark.parametrize('L', [3, 17, 489, 2**10, 1047, 2**11, 2**12])
@pytest.mark.parametrize('N', [4, 8, 16, 32, 64, 128, 256])
# @pytest.mark.parametrize('L', [2048])
# @pytest.mark.parametrize('N', [64])
def test_vand_mult_symmetric(N, L):
assert log_vandermonde_fast is not None, 'cauchy extension is not installed'
rtol, atol = (1e-4, 1e-4) if N <= 64 and L <= 1024 else(1e-3, 1e-3)
device = 'cuda'
batch_size = 4
torch.random.manual_seed(2357)
v = torch.randn(batch_size, N // 2, dtype=torch.cfloat, device=device, requires_grad=True)
x = (0.001 * torch.rand(batch_size, N // 2, device=device)
+ 1j * N * torch.rand(batch_size, N // 2, device=device))
x.requires_grad_()
v_keops = v.detach().clone().requires_grad_()
x_keops = x.detach().clone().requires_grad_()
out_keops = log_vandermonde(v_keops, x_keops, L)
out = log_vandermonde_fast(v, x, L)
err_out = (out - out_keops).abs()
dout = torch.randn_like(out)
dv_keops, dx_keops = torch.autograd.grad(out_keops, (v_keops, x_keops), dout, retain_graph=True)
dv, dx = torch.autograd.grad(out, (v, x), dout, retain_graph=True)
err_dv = (dv - dv_keops).abs()
err_dx = (dx - dx_keops).abs()
print(f'out error: max {err_out.amax().item():.6f}, mean {err_out.mean().item():.6f}')
print(f'dv error: max {err_dv.amax().item():.6f}, mean {err_dv.mean().item():.6f}')
print(f'dx relative error: max {err_dx.amax().item():.6f}, mean {err_dx.mean().item():.6f}')
assert torch.allclose(out, out_keops, rtol=rtol, atol=atol)
assert torch.allclose(dv, dv_keops, rtol=rtol, atol=atol)
assert torch.allclose(dx, dx_keops, rtol=rtol, atol=atol)
| state-spaces-main | extensions/kernels/test_vandermonde.py |
import torch
from structured_kernels import vand_log_mult_sym_fwd, vand_log_mult_sym_bwd
def log_vandermonde_cuda(v, z, L):
""" Wrap the cuda method to deal with shapes """
v, z = torch.broadcast_tensors(v, z)
shape = v.shape
v = v.contiguous()
z = z.contiguous()
N = v.size(-1)
assert z.size(-1) == N
y = LogVandMultiplySymmetric.apply(v.view(-1, N), z.view(-1, N), L)
y = y.view(*shape[:-1], L)
return y
class LogVandMultiplySymmetric(torch.autograd.Function):
@staticmethod
def forward(ctx, v, x, L):
batch, N = v.shape
supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
if not N in supported_N_values:
raise NotImplementedError(f'Only support N values in {supported_N_values}')
max_L_value = 32 * 1024 * 64 * 1024
if L > max_L_value:
raise NotImplementedError(f'Only support L values <= {max_L_value}')
if not v.is_cuda and x.is_cuda:
raise NotImplementedError(f'Only support CUDA tensors')
ctx.save_for_backward(v, x)
return vand_log_mult_sym_fwd(v, x, L)
@staticmethod
def backward(ctx, dout):
v, x = ctx.saved_tensors
dv, dx = vand_log_mult_sym_bwd(v, x, dout)
return dv, dx, None
if vand_log_mult_sym_fwd and vand_log_mult_sym_bwd is not None:
log_vandermonde_fast = LogVandMultiplySymmetric.apply
else:
log_vandermonde_fast = None
| state-spaces-main | extensions/kernels/vandermonde.py |
from pathlib import Path
import torch
from einops import rearrange
from structured_kernels import cauchy_mult_sym_fwd, cauchy_mult_sym_bwd
# try:
# from cauchy_mult import cauchy_mult_sym_fwd, cauchy_mult_sym_bwd
# except ImportError:
# from torch.utils.cpp_extension import load
# current_dir = Path(__file__).parent.absolute()
# cauchy_mult_extension = load(
# name='cauchy_mult',
# sources=[str(current_dir / 'cauchy.cpp'), str(current_dir / 'cauchy_cuda.cu')],
# extra_cflags=['-g', '-march=native', '-funroll-loops'],
# extra_cuda_cflags=['-O3', '-lineinfo', '--use_fast_math'],
# extra_include_paths=str(current_dir),
# build_directory=str(current_dir),
# verbose=True
# )
# cauchy_mult_sym_fwd = cauchy_mult_extension.cauchy_mult_sym_fwd
# cauchy_mult_sym_bwd = cauchy_mult_extension.cauchy_mult_sym_bwd
def cauchy_mult_torch(v: torch.Tensor, z: torch.Tensor, w: torch.Tensor,
symmetric=True) -> torch.Tensor:
"""
v: (B, N)
z: (L)
w: (B, N)
symmetric: whether to assume that v and w contain complex conjugate pairs, of the form
[v_half, v_half.conj()] and [w_half, w_half.conj()]
"""
if not symmetric:
return (rearrange(v, 'b n -> b 1 n') / (rearrange(z, 'l -> l 1') - rearrange(w, 'b n -> b 1 n'))).sum(dim=-1)
else:
N = v.shape[-1]
assert N % 2 == 0
vv = rearrange(v[:, :N // 2], 'b n -> b 1 n')
zz = rearrange(z, 'l -> l 1')
ww = rearrange(w[:, :N // 2], 'b n -> b 1 n')
# return 2 * ((zz * vv.real - vv.real * ww.real - vv.imag * ww.imag)
# / (zz * zz - 2 * zz * ww.real + ww.abs().square())).sum(dim=-1)
return (vv / (zz - ww) + vv.conj() / (zz - ww.conj())).sum(dim=-1)
def cauchy_mult_keops(v, z, w):
from pykeops.torch import LazyTensor
v_l = LazyTensor(rearrange(v, 'b N -> b 1 N 1'))
z_l = LazyTensor(rearrange(z, 'L -> 1 L 1 1'))
w_l = LazyTensor(rearrange(w, 'b N -> b 1 N 1'))
sub = z_l - w_l # (b N L 1), for some reason it doesn't display the last dimension
div = v_l / sub
s = div.sum(dim=2, backend='GPU')
return s.squeeze(-1)
def _cauchy_mult(v, z, w):
return CauchyMultiplySymmetric.apply(v, z, w)
def cauchy_mult(v, z, w):
""" Wrap the cuda method to deal with shapes """
v, w = torch.broadcast_tensors(v, w)
shape = v.shape
# z_shape = z.shape
# z = z.squeeze()
assert len(z.shape) == 1
v = v.contiguous()
w = w.contiguous()
z = z.contiguous()
N = v.size(-1)
assert w.size(-1) == N
y = _cauchy_mult(v.view(-1, N), z, w.view(-1, N))
y = y.view(*shape[:-1], z.size(-1))
return y
class CauchyMultiplySymmetric(torch.autograd.Function):
@staticmethod
def forward(ctx, v, z, w):
batch, N = v.shape
supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
L = z.shape[-1]
if not N in supported_N_values:
raise NotImplementedError(f'Only support N values in {supported_N_values}')
max_L_value = 32 * 1024 * 64 * 1024
if L > max_L_value:
raise NotImplementedError(f'Only support L values <= {max_L_value}')
if not (v.is_cuda and z.is_cuda and w.is_cuda):
raise NotImplementedError(f'Only support CUDA tensors')
ctx.save_for_backward(v, z, w)
return cauchy_mult_sym_fwd(v, z, w)
@staticmethod
def backward(ctx, dout):
v, z, w = ctx.saved_tensors
dv, dw = cauchy_mult_sym_bwd(v, z, w, dout)
return dv, None, dw
| state-spaces-main | extensions/kernels/cauchy.py |
from setuptools import setup
import torch.cuda
from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
ext_modules = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
'structured_kernels', [
'cauchy.cpp',
'cauchy_cuda.cu',
],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
# 'nvcc': ['-O2', '-lineinfo']
'nvcc': ['-O2', '-lineinfo', '--use_fast_math']
}
)
ext_modules.append(extension)
setup(
name='structured_kernels',
version="0.1.0",
ext_modules=ext_modules,
# cmdclass={'build_ext': BuildExtension.with_options(use_ninja=False)})
cmdclass={'build_ext': BuildExtension})
| state-spaces-main | extensions/kernels/setup.py |
import math
from functools import partial
import torch
from einops import rearrange
from .cauchy import cauchy_mult_torch, cauchy_mult_keops, cauchy_mult
from benchmark.utils import benchmark_all, benchmark_combined, benchmark_forward, benchmark_backward
def generate_data(batch_size, N, L, symmetric=True, device='cuda'):
if not symmetric:
v = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True)
w = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True)
z = torch.randn(L, dtype=torch.complex64, device=device)
else:
assert N % 2 == 0
v_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device)
v = torch.cat([v_half, v_half.conj()], dim=-1).requires_grad_(True)
w_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device)
w = torch.cat([w_half, w_half.conj()], dim=-1).requires_grad_(True)
z = torch.exp(1j * torch.randn(L, dtype=torch.float32, device=device))
return v, z, w
if __name__ == '__main__':
device = 'cuda'
bs = 1024
N = 64
L = 16384
v, z, w = generate_data(bs, N, L, symmetric=True)
v_half = v[:, :N // 2].clone().detach().requires_grad_(True)
w_half = w[:, :N // 2].clone().detach().requires_grad_(True)
repeat = 30
benchmark_all(repeat, cauchy_mult_keops, v, z, w, desc='Cauchy mult keops')
fn = partial(cauchy_mult, symmetric=False)
benchmark_all(repeat, fn, v, z, w, desc='Cauchy mult')
fn = partial(cauchy_mult, symmetric=True)
benchmark_all(repeat, fn, v_half, z, w_half, desc='Cauchy mult symmetric')
| state-spaces-main | extensions/kernels/benchmark_cauchy.py |
import importlib
import json
import argparse
import torch
from benchmark.utils import benchmark_forward
def generate_data(batch_size, N, L, symmetric=True, device='cuda'):
if not symmetric:
v = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True)
w = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True)
z = torch.randn(L, dtype=torch.complex64, device=device)
else:
assert N % 2 == 0
v_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device)
v = torch.cat([v_half, v_half.conj()], dim=-1).requires_grad_(True)
w_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device)
w = torch.cat([w_half, w_half.conj()], dim=-1).requires_grad_(True)
z = torch.exp(1j * torch.randn(L, dtype=torch.float32, device=device))
return v, z, w
parser = argparse.ArgumentParser(description='Tuning Cauchy multiply')
parser.add_argument('--name', default='cauchy_mult')
parser.add_argument('--mode', default='forward', choices=['forward', 'backward'])
parser.add_argument('-bs', '--batch-size', default=1024, type=int)
parser.add_argument('-N', default=64, type=int)
parser.add_argument('-L', default=2 ** 14, type=int)
if __name__ == '__main__':
args = parser.parse_args()
device = 'cuda'
bs = args.batch_size
N = args.N
L = args.L
repeat = 30
v, z, w = generate_data(bs, N, L, symmetric=True)
v_half = v[:, :N // 2].clone().detach().requires_grad_(True)
w_half = w[:, :N // 2].clone().detach().requires_grad_(True)
tuning_extension_name = args.name
# print('Extension name:', tuning_extension_name)
module = importlib.import_module(tuning_extension_name)
if args.mode == 'forward':
_, m = benchmark_forward(repeat, module.cauchy_mult_sym_fwd, v_half, z, w_half,
verbose=False, desc='Cauchy mult symmetric fwd')
else:
out = module.cauchy_mult_sym_fwd(v_half, z, w_half)
dout = torch.randn_like(out)
_, m = benchmark_forward(repeat, module.cauchy_mult_sym_bwd, v_half, z, w_half, dout,
verbose=False, desc='Cauchy mult symmetric bwd')
result_dict = dict(time_mean = m.mean, time_iqr = m.iqr)
print(json.dumps(result_dict))
| state-spaces-main | extensions/kernels/benchmark_cauchy_tune.py |
import os
import shutil
import subprocess
import sys
# import tempfile
# import importlib
import random
import string
import json
from functools import partial
from multiprocessing import Pipe, Pool, Process
from pathlib import Path
from tqdm import tqdm
import numpy as np
def read_file(filename):
""" return the contents of the file named filename or None if file not found """
if os.path.isfile(filename):
with open(filename, 'r') as f:
return f.read()
def write_file(filename, string):
"""dump the contents of string to a file called filename"""
with open(filename, 'w', encoding="utf-8") as f:
f.write(string)
def prepare_kernel_string(kernel_string, params):
for k, v in params.items():
kernel_string = "#define " + k + " " + str(v) + "\n" + kernel_string
return kernel_string
def compile_extension(temp_dir, install=False, verbose=True):
# Need to copy this process's environments, otherwise it can't find the compilers
env = {**os.environ,
'TUNING_SOURCE_DIR': str(temp_dir),
'TUNING_EXTENSION_NAME': str(temp_dir.stem)}
# https://stackoverflow.com/questions/53173314/how-to-change-distutils-output-directory
# Need separate build directories for parallel compilation
output = subprocess.run(
# [sys.executable, "tuning_setup.py", 'build', f'--build-base={str(temp_dir)}',
# f'--build-lib={str(temp_dir)}'],
[sys.executable, "tuning_setup.py", 'build' if not install else 'develop'],
cwd=temp_dir,
env=env,
capture_output=True,
# check=True
)
if verbose:
print(output)
print('Done compiling' if not install else 'Done installing')
def uninstall_extensions(tuning_extension_names, verbose=True):
# Need to copy this process's environments, otherwise it can't find the compilers
env = {**os.environ}
output = subprocess.run(
[sys.executable, '-m', 'pip', 'uninstall', '-y', *tuning_extension_names],
env=env,
capture_output=True,
# check=True
)
if verbose:
print(output)
print('Done uninstalling')
def benchmark_extension(benchmark_script, *benchmark_args, verbose=True):
# Need to copy this process's environments, otherwise it can't find the compilers
env = os.environ
# https://stackoverflow.com/questions/53173314/how-to-change-distutils-output-directory
# Need separate build directories for parallel compilation
process = subprocess.run(
[sys.executable, benchmark_script, *benchmark_args],
env=os.environ,
capture_output=True,
# check=True
)
if verbose:
print(process)
print('Done benchmarking')
return json.loads(process.stdout.decode(sys.stdout.encoding))
# def benchmark(connection, temp_dir):
# import torch
# # module = importlib.import_module(tuning_extension_name)
# torch.ops.load_library(temp_dir / 'torch_butterfly_tuning.so')
# batch_size = 1024
# n = 32
# twiddle = torch.randn(1, 1, 5, n // 2, 2, 2, device='cuda')
# input = torch.randn(batch_size, 1, n, device=twiddle.device)
# output = torch.ops.torch_butterfly.butterfly_multiply_fw(twiddle, input, True)
# # https://medium.com/@auro_227/timing-your-pytorch-code-fragments-e1a556e81f2
# res = []
# for _ in range(32):
# start = torch.cuda.Event(enable_timing=True)
# end = torch.cuda.Event(enable_timing=True)
# start.record()
# output = torch.ops.torch_butterfly.butterfly_multiply_fw(twiddle, input, True)
# end.record()
# torch.cuda.synchronize()
# res.append(start.elapsed_time(end))
# print(output.shape)
# res = np.array(res)
# connection.send((np.mean(res), np.std(res)))
def set_up_tuning_temp_dir(params: dict, source_files, extension_dir, verbose=True):
if verbose:
print('params: ', params)
# TD [2021-10-22]: tempfile.mkdtemp sometimes create dir name with '_' in it, thus messing up
# the extension name.
# temp_dir = Path(tempfile.mkdtemp(prefix="temp_", dir=Path.cwd().parent)).absolute()
tuning_extension_name = 'temp_' + ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
temp_dir = (Path.cwd().parent / tuning_extension_name).absolute()
if temp_dir.exists():
shutil.rmtree(temp_dir) # shutil.copytree doesn't want directory that already exists
shutil.copytree(extension_dir, temp_dir)
sources = [temp_dir / name for name in source_files]
for kernel_source in sources:
ks = read_file(kernel_source)
ks = prepare_kernel_string(ks, params)
write_file(kernel_source, ks)
return temp_dir
class KernelTuner:
def __init__(self, extension_dir, source_files, params_list, benchmark_script,
benchmark_args, npool=8, verbose=True):
self.extension_dir = extension_dir
self.source_files = source_files
self.params_list = params_list
self.benchmark_script = benchmark_script
self.benchmark_args = benchmark_args
self.npool = npool
self.verbose = verbose
def tune(self):
temp_dirs = [set_up_tuning_temp_dir(params, self.source_files, self.extension_dir,
verbose=self.verbose)
for params in self.params_list]
# Compile in parallel (for speed), then install sequentially to ensure correctness
with Pool(self.npool) as p:
p.map(compile_extension, temp_dirs)
# with Pool(1) as p:
# p.map(partial(compile_extension, install=True), [temp_dirs])
for temp_dir in tqdm(temp_dirs):
try:
compile_extension(temp_dir, install=True)
except:
pass
# # We benchmark on a separate process so that they can import the extension that just got compiled.
# for params, temp_dir in params_tempdir:
# print('Benchmarking: ', params)
# recv_conn, send_conn = Pipe(duplex=False)
# benchmark_process = Process(target=benchmark_fwd, args=(send_conn, str(temp_dir.stem)))
# benchmark_process.start()
# result = recv_conn.recv()
# benchmark_process.join()
# print('result', result)
results = []
for params, temp_dir in tqdm(list(zip(self.params_list, temp_dirs))):
try:
results.append((params,
benchmark_extension(self.benchmark_script,
*['--name', temp_dir.stem] + self.benchmark_args)))
except:
pass
print(results)
uninstall_extensions([temp_dir.stem for temp_dir in temp_dirs])
for temp_dir in temp_dirs:
shutil.rmtree(temp_dir)
return results
| state-spaces-main | extensions/kernels/tuner.py |
import os
from setuptools import setup
from pathlib import Path
import torch.cuda
from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
extensions_dir = Path(os.getenv('TUNING_SOURCE_DIR')).absolute()
assert extensions_dir.exists()
source_files=[
'cauchy.cpp',
'cauchy_cuda.cu',
]
sources = [str(extensions_dir / name) for name in source_files]
extension_name = os.getenv('TUNING_EXTENSION_NAME', default='cauchy_mult_tuning')
ext_modules = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
extension_name,
sources,
include_dirs=[extensions_dir],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
# 'nvcc': ['-O2', '-lineinfo']
'nvcc': ['-O2', '-lineinfo', '--use_fast_math']
}
)
ext_modules.append(extension)
setup(
name=extension_name,
ext_modules=ext_modules,
# cmdclass={'build_ext': BuildExtension.with_options(use_ninja=False)})
cmdclass={'build_ext': BuildExtension})
| state-spaces-main | extensions/kernels/tuning_setup.py |
import math
import json
import argparse
import itertools
from pathlib import Path
from tuner import KernelTuner
def forward_params_list(N):
blocksize_params = ('MAX_BLOCK_SIZE_VALUE', [64, 128, 256, 512, 1024])
thread_value_default = [2, 4, 8, 16, 32, 32, 32, 32, 32, 32]
thread_values_supported = [2, 4, 8, 16, 32, 64, 128]
log_N_half = int(math.log2(N)) - 1
thread_values = []
for val in thread_values_supported:
if val <= N // 2:
array = list(thread_value_default)
array[log_N_half - 1] = val
thread_values.append('{' + ', '.join(str(v) for v in array) + '}')
thread_params = ('ITEMS_PER_THREAD_SYM_FWD_VALUES', thread_values)
value_prod = itertools.product(thread_params[1], blocksize_params[1])
params_list = [{thread_params[0]: value[0], blocksize_params[0]: value[1]}
for value in value_prod]
return params_list
def backward_params_list(L):
thread_value_supported = [8, 16, 32, 64, 128]
thread_params = ('ITEMS_PER_THREAD_SYM_BWD_VALUE', [v for v in thread_value_supported
if (L + v - 1) // v <= 1024])
params_list = [{thread_params[0]: value} for value in thread_params[1]]
return params_list
parser = argparse.ArgumentParser(description='Tuning Cauchy multiply')
parser.add_argument('--mode', default='forward', choices=['forward', 'backward'])
parser.add_argument('-N', default=64, type=int)
parser.add_argument('-L', default=2 ** 14, type=int)
parser.add_argument('--filename', default='tuning_result.json')
if __name__ == '__main__':
args = parser.parse_args()
extension_dir = Path(__file__).absolute().parent
source_files = ['cauchy_cuda.cu']
if args.mode == 'forward':
params_list = forward_params_list(args.N)
tuner = KernelTuner(extension_dir, source_files, params_list,
benchmark_script='benchmark_cauchy_tune.py',
benchmark_args=['--mode', 'forward', '-N', str(args.N), '-L', '16384'],
npool=16)
else:
params_list = backward_params_list(args.L)
tuner = KernelTuner(extension_dir, source_files, params_list,
benchmark_script='benchmark_cauchy_tune.py',
benchmark_args=['--mode', 'backward', '-N', '64', '-L', str(args.L)],
npool=16)
result = tuner.tune()
with open(args.filename, 'w') as f:
json.dump(result, f)
| state-spaces-main | extensions/kernels/tune_cauchy.py |
import math
import torch
import pytest
from einops import rearrange
from cauchy import cauchy_mult_torch, cauchy_mult_keops, cauchy_mult
def generate_data(batch_size, N, L, symmetric=True, device='cuda'):
if not symmetric:
v = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True)
w = torch.randn(batch_size, N, dtype=torch.complex64, device=device, requires_grad=True)
z = torch.randn(L, dtype=torch.complex64, device=device)
else:
assert N % 2 == 0
v_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device)
v = torch.cat([v_half, v_half.conj()], dim=-1).requires_grad_(True)
w_half = torch.randn(batch_size, N // 2, dtype=torch.complex64, device=device)
w = torch.cat([w_half, w_half.conj()], dim=-1).requires_grad_(True)
z = torch.exp(1j * torch.randn(L, dtype=torch.float32, device=device))
return v, z, w
def grad_to_half_grad(dx):
dx_half, dx_half_conj = dx.chunk(2, dim=-1)
return dx_half + dx_half_conj.conj()
@pytest.mark.parametrize('L', [3, 17, 489, 2**10, 1047, 2**11, 2**12, 2**13, 2**14, 2**18])
@pytest.mark.parametrize('N', [4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048])
def test_cauchy_mult_symmetric(N, L):
# rtol, atol = (1e-4, 1e-4) if N <= 64 and L <= 1024 else(1e-3, 1e-3)
atol = 1e-4
tol_factor = 2.0 # Our error shouldn't be this much higher than Keops' error
device = 'cuda'
batch_size = 4
torch.random.manual_seed(2357)
v, z, w = generate_data(batch_size, N, L, symmetric=True, device=device)
v_half = v[:, :N // 2].clone().detach().requires_grad_(True)
w_half = w[:, :N // 2].clone().detach().requires_grad_(True)
# out_torch = cauchy_mult_torch(v, z, w, symmetric=True)
out_torch = cauchy_mult_torch(v.cdouble(), z.cdouble(), w.cdouble(), symmetric=True).cfloat()
out_keops = cauchy_mult_keops(v, z, w)
out = cauchy_mult(v_half, z, w_half)
relerr_out_keops = (out_keops - out_torch).abs() / out_torch.abs()
relerr_out = (out - out_torch).abs() / out_torch.abs()
dout = torch.randn_like(out)
dv_torch, dw_torch = torch.autograd.grad(out_torch, (v, w), dout, retain_graph=True)
dv_torch, dw_torch = dv_torch[:, :N // 2], dw_torch[:, :N // 2]
dv_keops, dw_keops = torch.autograd.grad(out_keops, (v, w), dout, retain_graph=True)
dv_keops, dw_keops = grad_to_half_grad(dv_keops), grad_to_half_grad(dw_keops)
dv, dw = torch.autograd.grad(out, (v_half, w_half), dout, retain_graph=True)
relerr_dv_keops = (dv_keops - dv_torch).abs() / dv_torch.abs()
relerr_dv = (dv - dv_torch).abs() / dv_torch.abs()
relerr_dw_keops = (dw_keops - dw_torch).abs() / dw_torch.abs()
relerr_dw = (dw - dw_torch).abs() / dw_torch.abs()
print(f'Keops out relative error: max {relerr_out_keops.amax().item():.6f}, mean {relerr_out_keops.mean().item():6f}')
print(f'out relative error: max {relerr_out.amax().item():.6f}, mean {relerr_out.mean().item():.6f}')
print(f'Keops dv relative error: max {relerr_dv_keops.amax().item():.6f}, mean {relerr_dv_keops.mean().item():6f}')
print(f'dv relative error: max {relerr_dv.amax().item():.6f}, mean {relerr_dv.mean().item():.6f}')
print(f'Keops dw relative error: max {relerr_dw_keops.amax().item():.6f}, mean {relerr_dw_keops.mean().item():6f}')
print(f'dw relative error: max {relerr_dw.amax().item():.6f}, mean {relerr_dw.mean().item():.6f}')
assert (relerr_out.amax() <= relerr_out_keops.amax() * tol_factor + atol)
assert (relerr_out.mean() <= relerr_out_keops.mean() * tol_factor + atol)
# assert torch.allclose(out, out_torch, rtol=rtol, atol=atol)
# assert torch.allclose(out, out_keops, rtol=rtol, atol=atol)
assert (relerr_dv.amax() <= relerr_dv_keops.amax() * tol_factor + atol)
assert (relerr_dv.mean() <= relerr_dv_keops.mean() * tol_factor + atol)
assert (relerr_dw.amax() <= relerr_dw_keops.amax() * tol_factor + atol)
assert (relerr_dw.mean() <= relerr_dw_keops.mean() * tol_factor + atol)
# assert torch.allclose(dv, dv_torch, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dv, dv_keops, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dw, dw_torch, rtol=1e-4, atol=1e-4)
# assert torch.allclose(dw, dw_keops, rtol=1e-4, atol=1e-4)
| state-spaces-main | extensions/kernels/test_cauchy.py |
"""Implementations of general metric functions."""
import math
import torch
import torch.nn.functional as F
from sklearn.metrics import f1_score, roc_auc_score
from functools import partial
def _student_t_map(mu, sigma, nu):
sigma = F.softplus(sigma)
nu = 2.0 + F.softplus(nu)
return mu.squeeze(axis=-1), sigma.squeeze(axis=-1), nu.squeeze(axis=-1)
def student_t_loss(outs, y):
mu, sigma, nu = outs[..., 0], outs[..., 1], outs[..., 2]
mu, sigma, nu = _student_t_map(mu, sigma, nu)
y = y.squeeze(axis=-1)
nup1_half = (nu + 1.0) / 2.0
part1 = 1.0 / nu * torch.square((y - mu) / sigma)
Z = (
torch.lgamma(nup1_half)
- torch.lgamma(nu / 2.0)
- 0.5 * torch.log(math.pi * nu)
- torch.log(sigma)
)
ll = Z - nup1_half * torch.log1p(part1)
return -ll.mean()
def gaussian_ll_loss(outs, y):
mu, sigma = outs[..., 0], outs[..., 1]
y = y.squeeze(axis=-1)
sigma = F.softplus(sigma)
ll = -1.0 * (
torch.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * torch.square((y - mu) / sigma)
)
return -ll.mean()
def binary_cross_entropy(logits, y):
# BCE loss requires squeezing last dimension of logits so it has the same shape as y
# requires y to be float, since it's overloaded to represent a probability
return F.binary_cross_entropy_with_logits(logits.squeeze(-1), y.float())
def binary_accuracy(logits, y):
return torch.eq(logits.squeeze(-1) >= 0, y).float().mean()
def cross_entropy(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return F.cross_entropy(logits, y)
def soft_cross_entropy(logits, y, label_smoothing=0.0):
logits = logits.view(-1, logits.shape[-1])
# target is now 2d (no target flattening)
return F.cross_entropy(logits, y, label_smoothing=label_smoothing)
def accuracy(logits, y):
logits = logits.view(-1, logits.shape[-1])
if y.numel() > logits.shape[0]:
# Mixup leads to this case: use argmax class
y = y.argmax(dim=-1)
y = y.view(-1)
return torch.eq(torch.argmax(logits, dim=-1), y).float().mean()
def accuracy_at_k(logits, y, k=1):
logits = logits.view(-1, logits.shape[-1])
if y.numel() > logits.shape[0]:
# Mixup leads to this case: use argmax class
y = y.argmax(dim=-1)
y = y.view(-1)
return torch.topk(logits, k, dim=-1)[1].eq(y.unsqueeze(-1)).any(dim=-1).float().mean()
def f1_binary(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="binary")
def f1_macro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="macro")
def f1_micro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="micro")
def roc_auc_macro(logits, y):
logits = logits.view(
-1, logits.shape[-1]
).detach() # KS: had to add detach to eval while training
y = y.view(-1)
return roc_auc_score(
y.cpu().numpy(), F.softmax(logits, dim=-1).cpu().numpy()[:, 1], average="macro"
)
def roc_auc_micro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return roc_auc_score(
y.cpu().numpy(), F.softmax(logits, dim=-1).cpu().numpy()[:, 1], average="micro"
)
def mse(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.mse_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
# TODO document the use case of this
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.mse_loss(outs_masked, y_masked)
def forecast_rmse(outs, y, len_batch=None):
# TODO: generalize, currently for Monash dataset
return torch.sqrt(F.mse_loss(outs, y, reduction='none').mean(1)).mean()
def mae(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.l1_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.l1_loss(outs_masked, y_masked)
"""Metrics that can depend on the loss."""
def loss(x, y, loss_fn):
"""Metric that just returns the loss function.
This metric may be useful because the training loss may add extra regularization (e.g. weight decay implemented as L2 penalty), while adding this as a metric skips the additional losses """
return loss_fn(x, y)
def bpb(x, y, loss_fn):
"""Bits per byte (for image density estimation, speech generation, char LM)."""
return loss_fn(x, y) / math.log(2)
def ppl(x, y, loss_fn):
return torch.exp(loss_fn(x, y))
# Should be a better way to do this
output_metric_fns = {
"binary_cross_entropy": binary_cross_entropy,
"cross_entropy": cross_entropy,
"binary_accuracy": binary_accuracy,
"accuracy": accuracy,
'accuracy@3': partial(accuracy_at_k, k=3),
'accuracy@5': partial(accuracy_at_k, k=5),
'accuracy@10': partial(accuracy_at_k, k=10),
"eval_loss": loss,
"mse": mse,
"mae": mae,
"forecast_rmse": forecast_rmse,
"f1_binary": f1_binary,
"f1_macro": f1_macro,
"f1_micro": f1_micro,
"roc_auc_macro": roc_auc_macro,
"roc_auc_micro": roc_auc_micro,
"soft_cross_entropy": soft_cross_entropy, # only for pytorch 1.10+
"student_t": student_t_loss,
"gaussian_ll": gaussian_ll_loss,
}
try:
from segmentation_models_pytorch.utils.functional import iou
from segmentation_models_pytorch.losses.focal import focal_loss_with_logits
def iou_with_logits(pr, gt, eps=1e-7, threshold=None, ignore_channels=None):
return iou(pr.sigmoid(), gt, eps=eps, threshold=threshold, ignore_channels=ignore_channels)
output_metric_fns["iou"] = partial(iou, threshold=0.5)
output_metric_fns["iou_with_logits"] = partial(iou_with_logits, threshold=0.5)
output_metric_fns["focal_loss"] = focal_loss_with_logits
except ImportError:
pass
loss_metric_fns = {
"loss": loss,
"bpb": bpb,
"ppl": ppl,
}
metric_fns = {**output_metric_fns, **loss_metric_fns} # TODO py3.9
| state-spaces-main | src/tasks/metrics.py |
"""Implements Task interface, which consists of encoder + decoder + loss/metrics."""
from typing import Optional, List, Tuple
import math
import functools
import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from omegaconf import ListConfig
from src.models.nn.normalization import (
ReversibleInstanceNorm1dInput,
ReversibleInstanceNorm1dOutput,
TSNormalization,
TSInverseNormalization,
)
from src.models.nn.adaptive_softmax import AdaptiveEmbedding, ProjectedAdaptiveLogSoftmax
import src.tasks.metrics as M
import src.models.nn.utils as U
import torchmetrics as tm
from src.utils.config import to_list, instantiate
class BaseTask:
"""Abstract class for all tasks.
This class takes care of:
- loss function
- arbitrary metrics
- (optional) encoder module that interfaces with dataset (inputs) and model
- (optional) decoder module that interfaces with dataset (targets) and model
"""
encoder = None
decoder = None
def __init__(self, dataset=None, model=None, loss=None, loss_val=None, metrics=None, torchmetrics=None):
""" This class is allowed to grab attributes directly off a constructed dataset and model object """
self.dataset = dataset
self.model = model
if metrics is None: metrics = []
self.metric_names = to_list(metrics)
if torchmetrics is None: torchmetrics = []
self.torchmetric_names = to_list(torchmetrics)
self._tracked_torchmetrics = {}
# The decoder might pass through arguments that the loss needs (e.g. sequence lengths)
# but might also pass through extraneous arguments (e.g. sampling rate)
# Wrap loss and metrics so that they accept kwargs and
# Create loss function
self.loss = instantiate(M.output_metric_fns, loss, partial=True)
self.loss = U.discard_kwargs(self.loss)
if loss_val is not None:
self.loss_val = instantiate(M.output_metric_fns, loss_val, partial=True)
self.loss_val = U.discard_kwargs(self.loss_val)
def _init_torchmetrics(self, prefix):
"""Instantiate torchmetrics."""
# TODO torchmetrics is better renamed to "epoch_metrics" or something
self._tracked_torchmetrics[prefix] = {}
for name in self.torchmetric_names:
if name in ['AUROC', 'StatScores', 'Precision', 'Recall', 'F1', 'F1Score']:
self._tracked_torchmetrics[prefix][name] = getattr(tm, name)(average='macro', num_classes=self.dataset.d_output, compute_on_step=False).to('cuda')
elif '@' in name:
k = int(name.split('@')[1])
mname = name.split('@')[0]
self._tracked_torchmetrics[prefix][name] = getattr(tm, mname)(average='macro', num_classes=self.dataset.d_output, compute_on_step=False, top_k=k).to('cuda')
else:
self._tracked_torchmetrics[prefix][name] = getattr(tm, name)(compute_on_step=False).to('cuda')
def _reset_torchmetrics(self, prefix=None):
"""Reset torchmetrics for a prefix associated with a particular dataloader (e.g. train, val, test).
Generally do this at the start of an epoch.
"""
all_prefixes = [prefix] if prefix is not None else self._tracked_torchmetrics
for prefix in all_prefixes:
for name in self.torchmetric_names:
try:
self._tracked_torchmetrics[prefix][name].reset()
except KeyError: # metrics don't exist yet
pass
def get_torchmetrics(self, prefix):
"""Compute torchmetrics for a prefix associated with a particular dataloader (e.g. train, val, test).
Generally do this at the end of an epoch.
"""
return {name: self._tracked_torchmetrics[prefix][name].compute() for name in self.torchmetric_names}
def torchmetrics(self, x, y, prefix):
"""Update torchmetrics with new data.
Prefix corresponds to a particular dataloader (e.g. train, val, test).
Generally call this every batch.
"""
if prefix not in self._tracked_torchmetrics:
self._init_torchmetrics(prefix)
for name in self.torchmetric_names:
if name.startswith('Accuracy'):
if len(x.shape) > 2:
# Multi-dimensional, multi-class
self._tracked_torchmetrics[prefix][name].update(x.transpose(1, 2), y.squeeze())
continue
self._tracked_torchmetrics[prefix][name].update(x, y)
def metrics(self, x, y, **kwargs):
"""Add metrics to the task.
Metrics are just functions:
- output metrics are a function of output and target
- loss metrics are a function of loss (e.g. perplexity)
"""
output_metrics = {
name: U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
for name in self.metric_names if name in M.output_metric_fns
}
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
return {**output_metrics, **loss_metrics}
class Scalar(nn.Module):
def __init__(self, c=1):
super().__init__()
self.c = c
def forward(self, x):
return x * self.c
class LMTask(BaseTask):
def __init__(self, tied=False, rescale=True, **kwargs):
super().__init__(loss='cross_entropy', **kwargs)
n_tokens = self.dataset.n_tokens
d_model = self.model.d_model
d_output = self.model.d_output
if rescale:
scale = Scalar(math.sqrt(d_model))
else:
scale = None
embedding = nn.Embedding(n_tokens, d_model)
nn.init.normal_(embedding.weight, mean=0, std=d_model**-.5)
encoder = U.PassthroughSequential(
embedding,
scale,
)
self.encoder = encoder
decoder = nn.Linear(d_output, n_tokens)
if tied:
assert d_model == d_output
decoder.weight = self.encoder[0].weight
self.decoder = decoder
class ForecastingTask(BaseTask):
class DummyModule(nn.Module):
def forward(self, *args):
return args
def __init__(self, norm='mean', **kwargs):
super().__init__(**kwargs)
if norm == 'revnorm':
self.encoder = ReversibleInstanceNorm1dInput(self.dataset.d_input, transposed=False)
self.decoder = ReversibleInstanceNorm1dOutput(self.encoder)
elif norm == 'mean':
self.encoder = TSNormalization(method='mean', horizon=self.dataset.dataset_train.forecast_horizon)
self.decoder = TSInverseNormalization(method='mean', normalizer=self.encoder)
elif norm == 'last':
self.encoder = TSNormalization(method='last', horizon=self.dataset.dataset_train.forecast_horizon)
self.decoder = TSInverseNormalization(method='last', normalizer=self.encoder)
else:
self.encoder = None
self.decoder = None
try:
if hasattr(self.dataset.dataset_train, 'mean'):
self.mean = torch.tensor(self.dataset.dataset_train.mean)
self.std = torch.tensor(self.dataset.dataset_train.std)
elif hasattr(self.dataset.dataset_train, 'standardization'):
self.mean = torch.tensor(self.dataset.dataset_train.standardization['means'])
self.std = torch.tensor(self.dataset.dataset_train.standardization['stds'])
else:
self.mean = None
self.std = None
except AttributeError:
raise AttributeError('Dataset does not have mean/std attributes')
self.mean = torch.tensor(self.dataset.dataset_train.standardization['means'])
self.std = torch.tensor(self.dataset.dataset_train.standardization['stds'])
if hasattr(self.dataset.dataset_train, 'log_transform'):
self.log_transform = self.dataset.dataset_train.log_transform
else:
self.log_transform = False
print("Log Transform", self.log_transform)
def metrics(self, x, y, state=None, timestamps=None, ids=None): # Explicit about which arguments the decoder might pass through, but can future-proof with **kwargs
if self.mean is not None:
means = self.mean[ids].to(x.device)
stds = self.std[ids].to(x.device)
x_ = x * stds[:, None, None] + means[:, None, None]
y_ = y * stds[:, None, None] + means[:, None, None]
else:
x_ = x
y_ = y
if self.log_transform:
x_ = torch.exp(x_)
y_ = torch.exp(y_)
return super().metrics(x_, y_)
class VideoTask(BaseTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# self._y_to_logits = {}
self._vid_to_logits = {}
self._vid_to_label = {}
# TODO needed to extract the first element of y, which includes the video idea; there should be a cleaner pattern to this
import copy
loss_fn = copy.deepcopy(self.loss)
self.loss = lambda x, y: loss_fn(x, y[0])
self.loss = U.discard_kwargs(self.loss) # remove extra kwargs
if hasattr(self, 'loss_val'):
loss_val_fn = copy.deepcopy(self.loss_val)
self.loss_val = lambda x, y: loss_val_fn(x, y[0])
self.loss_val = U.discard_kwargs(self.loss_val) # remove extra kwargs
def metrics(self, logits, y, **kwargs):
labels, vids = y
return super().metrics(logits, labels, **kwargs)
def torchmetrics(self, logits, y, prefix):
"""
logits: (batch, n_classes)
y = tuple of labels and video ids
labels: (batch)
vids: (batch)
"""
for _logits, _label, _vid in zip(logits, y[0], y[1]):
_vid = _vid.item()
# Check that labels are consistent per video id
assert self._vid_to_label[prefix].get(_vid, _label) == _label
self._vid_to_label[prefix][_vid] = _label
self._vid_to_logits[prefix][_vid].append(_logits)
def _reset_torchmetrics(self, prefix):
self._vid_to_logits[prefix] = collections.defaultdict(list)
self._vid_to_label[prefix] = {}
def get_torchmetrics(self, prefix):
vid_to_average_logits = {vid: torch.mean(torch.stack(logits, dim=0), dim=0) for vid, logits in self._vid_to_logits[prefix].items()}
# y is (label, vid) pair
all_labels = torch.stack(list(self._vid_to_label[prefix].values()), dim=0) # (n_videos)
all_logits = torch.stack(list(vid_to_average_logits.values()), dim=0) # (n_videos, n_classes)
m = M.accuracy(all_logits, all_labels)
return {'aggregate_accuracy': m}
class AdaptiveLMTask(BaseTask):
def __init__(
self,
div_val,
cutoffs : List[int],
tie_weights : bool,
tie_projs : List[bool],
init_scale=1.0,
bias_scale=0.0,
dropemb=0.0,
dropsoft=0.0,
**kwargs,
):
super().__init__(**kwargs)
n_tokens = self.dataset.n_tokens
d_model = self.model.d_model
d_output = self.model.d_output
encoder = AdaptiveEmbedding(
n_tokens,
d_model,
d_model,
cutoffs=cutoffs,
div_val=div_val,
init_scale=init_scale,
dropout=dropemb,
)
if tie_weights:
assert d_model == d_output
emb_layers = [i.weight for i in encoder.emb_layers]
else:
emb_layers = None
# Construct decoder/loss
emb_projs = encoder.emb_projs
loss = ProjectedAdaptiveLogSoftmax(
n_tokens, d_output, d_output,
cutoffs, div_val=div_val,
tie_projs=tie_projs,
out_projs=emb_projs,
out_layers_weights=emb_layers,
bias_scale=bias_scale,
dropout=dropsoft,
)
self.encoder = encoder
self.loss = loss
class ImageNetTask(BaseTask):
"""
Imagenet training uses mixup augmentations, which require a separate loss for train and val,
which we overide the base task here.
Not really used anymore.
"""
def __init__(self, **kwargs):
import hydra
super().__init__(
dataset=kwargs.get("dataset", None),
model=kwargs.get("model", None),
loss=kwargs.get("loss", None), # we still create the base loss here, but will overide below
metrics=kwargs.get("metrics", None),
torchmetrics=kwargs.get("torchmetrics", None)
)
# if using mixup, overide loss (train) and loss_val, otherwise
# we have just one loss from the base task above
if "loss_val" in kwargs and "loss_train" in kwargs:
self.loss = hydra.utils.instantiate(kwargs.get("loss_train"))
self.loss_val = hydra.utils.instantiate(kwargs.get('loss_val'))
registry = {
'base': BaseTask,
'lm': LMTask,
'adaptivelm': AdaptiveLMTask,
'imagenet': ImageNetTask,
'forecasting': ForecastingTask,
'video': VideoTask,
}
| state-spaces-main | src/tasks/tasks.py |
"""Decoders that interface between targets and model."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce
import src.models.nn.utils as U
import src.utils as utils
class Decoder(nn.Module):
"""Abstract class defining the interface for Decoders.
TODO: is there a way to enforce the signature of the forward method?
"""
def forward(self, x, **kwargs):
"""
x: (batch, length, dim) input tensor
state: additional state from the model backbone
*args, **kwargs: additional info from the dataset
Returns:
y: output tensor
*args: other arguments to pass into the loss function
"""
return x
def step(self, x):
"""
x: (batch, dim)
"""
return self.forward(x.unsqueeze(1)).squeeze(1)
class SequenceDecoder(Decoder):
def __init__(
self, d_model, d_output=None, l_output=None, use_lengths=False, mode="last"
):
super().__init__()
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
if l_output is None:
self.l_output = None
self.squeeze = False
elif l_output == 0:
# Equivalent to getting an output of length 1 and then squeezing
self.l_output = 1
self.squeeze = True
else:
assert l_output > 0
self.l_output = l_output
self.squeeze = False
self.use_lengths = use_lengths
self.mode = mode
if mode == 'ragged':
assert not use_lengths
def forward(self, x, state=None, lengths=None, l_output=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.l_output is None:
if l_output is not None:
assert isinstance(l_output, int) # Override by pass in
else:
# Grab entire output
l_output = x.size(-2)
squeeze = False
else:
l_output = self.l_output
squeeze = self.squeeze
if self.mode == "last":
restrict = lambda x: x[..., -l_output:, :]
elif self.mode == "first":
restrict = lambda x: x[..., :l_output, :]
elif self.mode == "pool":
restrict = lambda x: (
torch.cumsum(x, dim=-2)
/ torch.arange(
1, 1 + x.size(-2), device=x.device, dtype=x.dtype
).unsqueeze(-1)
)[..., -l_output:, :]
def restrict(x):
L = x.size(-2)
s = x.sum(dim=-2, keepdim=True)
if l_output > 1:
c = torch.cumsum(x[..., -(l_output - 1) :, :].flip(-2), dim=-2)
c = F.pad(c, (0, 0, 1, 0))
s = s - c # (B, l_output, D)
s = s.flip(-2)
denom = torch.arange(
L - l_output + 1, L + 1, dtype=x.dtype, device=x.device
)
s = s / denom
return s
elif self.mode == "sum":
restrict = lambda x: torch.cumsum(x, dim=-2)[..., -l_output:, :]
# TODO use same restrict function as pool case
elif self.mode == 'ragged':
assert lengths is not None, "lengths must be provided for ragged mode"
# remove any additional padding (beyond max length of any sequence in the batch)
restrict = lambda x: x[..., : max(lengths), :]
else:
raise NotImplementedError(
"Mode must be ['last' | 'first' | 'pool' | 'sum']"
)
# Restrict to actual length of sequence
if self.use_lengths:
assert lengths is not None
x = torch.stack(
[
restrict(out[..., :length, :])
for out, length in zip(torch.unbind(x, dim=0), lengths)
],
dim=0,
)
else:
x = restrict(x)
if squeeze:
assert x.size(-2) == 1
x = x.squeeze(-2)
x = self.output_transform(x)
return x
def step(self, x, state=None):
# Ignore all length logic
return self.output_transform(x)
class NDDecoder(Decoder):
"""Decoder for single target (e.g. classification or regression)."""
def __init__(
self, d_model, d_output=None, mode="pool"
):
super().__init__()
assert mode in ["pool", "full"]
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
self.mode = mode
def forward(self, x, state=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.mode == 'pool':
x = reduce(x, 'b ... h -> b h', 'mean')
x = self.output_transform(x)
return x
class StateDecoder(Decoder):
"""Use the output state to decode (useful for stateful models such as RNNs or perhaps Transformer-XL if it gets implemented."""
def __init__(self, d_model, state_to_tensor, d_output):
super().__init__()
self.output_transform = nn.Linear(d_model, d_output)
self.state_transform = state_to_tensor
def forward(self, x, state=None):
return self.output_transform(self.state_transform(state))
class RetrievalHead(nn.Module):
def __init__(self, d_input, d_model, n_classes, nli=True, activation="relu"):
super().__init__()
self.nli = nli
if activation == "relu":
activation_fn = nn.ReLU()
elif activation == "gelu":
activation_fn = nn.GELU()
else:
raise NotImplementedError
if (
self.nli
): # Architecture from https://github.com/mlpen/Nystromformer/blob/6539b895fa5f798ea0509d19f336d4be787b5708/reorganized_code/LRA/model_wrapper.py#L74
self.classifier = nn.Sequential(
nn.Linear(4 * d_input, d_model),
activation_fn,
nn.Linear(d_model, n_classes),
)
else: # Head from https://github.com/google-research/long-range-arena/blob/ad0ff01a5b3492ade621553a1caae383b347e0c1/lra_benchmarks/models/layers/common_layers.py#L232
self.classifier = nn.Sequential(
nn.Linear(2 * d_input, d_model),
activation_fn,
nn.Linear(d_model, d_model // 2),
activation_fn,
nn.Linear(d_model // 2, n_classes),
)
def forward(self, x):
"""
x: (2*batch, dim)
"""
outs = rearrange(x, "(z b) d -> z b d", z=2)
outs0, outs1 = outs[0], outs[1] # (n_batch, d_input)
if self.nli:
features = torch.cat(
[outs0, outs1, outs0 - outs1, outs0 * outs1], dim=-1
) # (batch, dim)
else:
features = torch.cat([outs0, outs1], dim=-1) # (batch, dim)
logits = self.classifier(features)
return logits
class RetrievalDecoder(Decoder):
"""Combines the standard FeatureDecoder to extract a feature before passing through the RetrievalHead."""
def __init__(
self,
d_input,
n_classes,
d_model=None,
nli=True,
activation="relu",
*args,
**kwargs
):
super().__init__()
if d_model is None:
d_model = d_input
self.feature = SequenceDecoder(
d_input, d_output=None, l_output=0, *args, **kwargs
)
self.retrieval = RetrievalHead(
d_input, d_model, n_classes, nli=nli, activation=activation
)
def forward(self, x, state=None, **kwargs):
x = self.feature(x, state=state, **kwargs)
x = self.retrieval(x)
return x
class PackedDecoder(Decoder):
def forward(self, x, state=None):
x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)
return x
# For every type of encoder/decoder, specify:
# - constructor class
# - list of attributes to grab from dataset
# - list of attributes to grab from model
registry = {
"stop": Decoder,
"id": nn.Identity,
"linear": nn.Linear,
"sequence": SequenceDecoder,
"nd": NDDecoder,
"retrieval": RetrievalDecoder,
"state": StateDecoder,
"pack": PackedDecoder,
}
model_attrs = {
"linear": ["d_output"],
"sequence": ["d_output"],
"nd": ["d_output"],
"retrieval": ["d_output"],
"state": ["d_state", "state_to_tensor"],
"forecast": ["d_output"],
}
dataset_attrs = {
"linear": ["d_output"],
"sequence": ["d_output", "l_output"],
"nd": ["d_output"],
"retrieval": ["d_output"],
# TODO rename d_output to n_classes?
"state": ["d_output"],
"forecast": ["d_output", "l_output"],
}
def _instantiate(decoder, model=None, dataset=None):
"""Instantiate a single decoder"""
if decoder is None:
return None
if isinstance(decoder, str):
name = decoder
else:
name = decoder["_name_"]
# Extract arguments from attribute names
dataset_args = utils.config.extract_attrs_from_obj(
dataset, *dataset_attrs.get(name, [])
)
model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))
# Instantiate decoder
obj = utils.instantiate(registry, decoder, *model_args, *dataset_args)
return obj
def instantiate(decoder, model=None, dataset=None):
"""Instantiate a full decoder config, e.g. handle list of configs
Note that arguments are added in reverse order compared to encoder (model first, then dataset)
"""
decoder = utils.to_list(decoder)
return U.PassthroughSequential(
*[_instantiate(d, model=model, dataset=dataset) for d in decoder]
)
| state-spaces-main | src/tasks/decoders.py |
"""Encoders that interface between input data and model."""
import datetime
import math
from typing import ForwardRef
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
from src.models.sequence.backbones.block import SequenceResidualBlock
from src.models.nn import Normalization
class Encoder(nn.Module):
"""Encoder abstraction.
Accepts a tensor and optional kwargs. Outside of the main tensor, all other arguments should be kwargs.
Returns a tensor and optional kwargs.
Encoders are combined via U.PassthroughSequential which passes these kwargs through in a pipeline. The resulting kwargs are accumulated and passed into the model backbone.
"""
def forward(self, x, **kwargs):
"""
x: input tensor
*args: additional info from the dataset (e.g. sequence lengths)
Returns:
y: output tensor
*args: other arguments to pass into the model backbone
"""
return x, {}
# Adapted from https://github.com/pytorch/examples/blob/master/word_language_model/model.py
class PositionalEncoder(Encoder):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoder(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=16384, pe_init=None):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
if pe_init is not None:
self.pe = nn.Parameter(torch.empty(max_len, 1, d_model))
nn.init.normal_(self.pe, 0, pe_init)
# self.pe = pe.unsqueeze(1)
else:
pe = torch.zeros(max_len, d_model)
position = torch.arange(0.0, max_len).unsqueeze(1)
div_term = torch.exp(
-math.log(10000.0) * torch.arange(0.0, d_model, 2.0) / d_model
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
self.attn_mask = None
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
lens: actual lengths of sequences
Shape:
x: [l_sequence, n_batch, d_model]
Returns: [l_sequence, n_batch, d_model]
attn_mask: [l_sequence, l_sequence]
padding_mask:
"""
x = x + self.pe[: x.size(-2)]
return self.dropout(x)
class ClassEmbedding(Encoder):
# Should also be able to define this by subclassing Embedding
def __init__(self, n_classes, d_model):
super().__init__()
self.embedding = nn.Embedding(n_classes, d_model)
def forward(self, x, y):
x = x + self.embedding(y).unsqueeze(-2) # (B, L, D)
return x
class Conv1DEncoder(Encoder):
def __init__(self, d_input, d_model, kernel_size=25, stride=1, padding='same'):
super().__init__()
self.conv = nn.Conv1d(
in_channels=d_input,
out_channels=d_model,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
def forward(self, x):
# BLD -> BLD
x = self.conv(x.transpose(1, 2)).transpose(1, 2)
return x
class LayerEncoder(Encoder):
"""Use an arbitary SequenceModule layer"""
def __init__(self, d_model, prenorm=False, norm='layer', layer=None):
super().__init__()
# Simple stack of blocks
layer["transposed"] = False
self.layer = SequenceResidualBlock(
d_input=d_model,
prenorm=prenorm,
layer=layer,
residual='R',
norm=norm,
pool=None,
)
def forward(self, x):
x, _ = self.layer(x) # Discard state
return x
class TimestampEmbeddingEncoder(Encoder):
"""
General time encoder for Pandas Timestamp objects (encoded as torch tensors).
See MonashDataset for an example of how to return time features as 'z's.
"""
cardinalities = {
'day': (1, 31),
'hour': (0, 23),
'minute': (0, 59),
'second': (0, 59),
'month': (1, 12),
'year': (1950, 2010), # (1800, 3000) used to be (1970, datetime.datetime.now().year + 1) but was not enough for all datasets in monash
'dayofweek': (0, 6),
'dayofyear': (1, 366),
'quarter': (1, 4),
'week': (1, 53),
'is_month_start': (0, 1),
'is_month_end': (0, 1),
'is_quarter_start': (0, 1),
'is_quarter_end': (0, 1),
'is_year_start': (0, 1),
'is_year_end': (0, 1),
'is_leap_year': (0, 1),
}
def __init__(self, d_model, table=False, features=None):
super().__init__()
self.table = table
self.ranges = {k: max_val - min_val + 2 for k, (min_val, max_val) in self.cardinalities.items()} # padding for null included
if features is None:
pass
else:
self.cardinalities = {k: v for k, v in self.cardinalities.items() if k in features}
if table:
self.embedding = nn.ModuleDict({
attr: nn.Embedding(maxval - minval + 2, d_model, padding_idx=0)
for attr, (minval, maxval) in self.cardinalities.items()
})
else:
self.embedding = nn.ModuleDict({
attr: nn.Linear(1, d_model)
for attr in self.cardinalities
})
def forward(self, x, timestamps=None):
for attr in timestamps:
mask = timestamps[attr] == -1
timestamps[attr] = timestamps[attr] - self.cardinalities[attr][0]
timestamps[attr][mask] = 0
if self.table:
x = x + self.embedding[attr](timestamps[attr].to(torch.long))
else:
x = x + self.embedding[attr]((2 * timestamps[attr] / self.ranges[attr] - 1).unsqueeze(-1))
#x = x + self.embedding(timestamps[attr].to(torch.float)).unsqueeze(1)
return x
# TODO is this used anymore?
class TSIndexEmbeddingEncoder(Encoder):
"""
Embeds location of sample in the time series
"""
def __init__(self, n_ts, d_model, table=True):
super().__init__()
self.table = table
self.n_ts = n_ts
if table:
self.embedding = nn.Embedding(n_ts, d_model)
else:
# self.embedding = nn.Linear(1, d_model)
# self.linear = nn.Linear(2 * d_model, d_model)
self.linear = nn.Linear(d_model + 1, d_model)
def forward(self, x, z=None, idxs=None):
if self.table:
x = x + self.embedding(idxs.to(torch.long)).unsqueeze(1)
else:
# x = self.linear(torch.cat([x, self.embedding((2 * idxs / self.n_ts - 1)[:, None, None]).repeat((1, x.shape[1], 1))], axis=-1))
x = self.linear(torch.cat([x, ((2 * idxs / self.n_ts - 1)[:, None, None]).repeat((1, x.shape[1], 1))], axis=-1))
#x = x + self.embedding(idxs.unsqueeze(1).to(torch.float)).unsqueeze(1)
return x
class TimeEncoder(Encoder):
def __init__(self, n_tokens_time, d_model, timeenc=0):
super().__init__()
self.timeenc = timeenc
if self.timeenc == 0:
self.encoders = nn.ModuleList(
[nn.Embedding(v, d_model) for v in n_tokens_time]
)
else:
self.encoders = nn.Linear(len(n_tokens_time), d_model)
self.mask_embed = nn.Embedding(2, d_model)
def forward(self, x, mark=None, mask=None):
assert mark is not None and mask is not None, "Extra arguments should be returned by collate function"
if self.timeenc == 0:
assert mark.size(-1) == len(self.encoders)
embeddings = [
embed(z) for embed, z in zip(self.encoders, torch.unbind(mark, dim=-1))
]
time_encode = torch.sum(torch.stack(embeddings), dim=0)
else:
time_encode = self.encoders(mark)
mask_encode = self.mask_embed(mask.squeeze(-1))
return x + time_encode + mask_encode # (B, L, d_model)
class EEGAgeEncoder(Encoder):
def __init__(self, d_model):
super().__init__()
self.encoder = nn.Linear(1, d_model)
def forward(self, x, age=None):
z = self.encoder(((age - 50.0) / 100.0).unsqueeze(1))
return x + z.unsqueeze(1)
class PackedEncoder(Encoder):
def forward(self, x, len_batch=None):
assert len_batch is not None
x = nn.utils.rnn.pack_padded_sequence(
x, len_batch.cpu(), enforce_sorted=False, batch_first=True,
)
return x
class OneHotEncoder(Encoder):
def __init__(self, n_tokens, d_model):
super().__init__()
assert n_tokens <= d_model
self.d_model = d_model
def forward(self, x):
return F.one_hot(x.squeeze(-1), self.d_model).float()
class Conv3DPatchEncoder(Encoder):
"""For encoding 3D data (e.g. videos) into a sequence of patches.
Arguments:
- d_emb: dim of embedding output
- filter_sizes: tuple, with ft, fh, fw
- max_len: int, max seq len
"""
def __init__(self, d_emb, filter_sizes, pos_enc=False, max_len=2352):
self.pos_enc = pos_enc
ft, fh, fw = filter_sizes
super().__init__()
assert len(filter_sizes) == 3
self.encoder = nn.Conv3d(3, d_emb, kernel_size=(ft, fh, fw), stride=(ft, fh, fw))
def forward(self, x):
"""
x: shape = [b, c, t, h, w]
Returns tuple with x, with new shape = [b, seq_len, c_out]
"""
x = self.encoder(x)
b, c, t, h, w = x.shape
x = x.reshape([b, c, t*h*w]) # flatten spatial / temporal dim
x = x.permute(0, 2, 1) # permute the c and seq len for s4
return x
class Conv2DPatchEncoder(Encoder):
"""For encoding images into a sequence of patches.
Arguments:
- d_input: dim of encoder input (data dimension)
- d_model: dim of encoder output (model dimension)
- filter_sizes: tuple with fh, fw
- flat: if image is flattened from dataloader (like in cifar),
then we need to reshape back to 2D before conv
"""
def __init__(self, d_input, d_model, filter_sizes, flat=False):
fh, fw = filter_sizes
self.flat = flat
super().__init__()
assert len(filter_sizes) == 2
self.encoder = nn.Conv2d(d_input, d_model, kernel_size=(fh, fw), stride=(fh, fw))
def forward(self, x):
"""
x shape = [b, h, w, c]
Returns tuple with x, with new shape = [b, seq_len, c_out]
"""
x = rearrange(x, 'b h w c -> b c h w')
x = self.encoder(x)
x = rearrange(x, 'b c h w -> b (h w) c')
return x
class TextConditionalEncoder(Encoder):
def __init__(self, vocab_size, d_model, n_layers, layer, reversal=False):
super().__init__()
# d_model = 2 * d_model
self.reversal = reversal
self.padding_idx = vocab_size - 1
self.text_embedding = nn.Embedding(vocab_size, d_model)
# Simple stack of blocks
self.text_encoder = nn.ModuleList([
SequenceResidualBlock(
d_input=d_model,
i_layer=i,
prenorm=True,
layer=layer,
residual='R',
norm='layer',
pool=None,
transposed=True,
) for i in range(n_layers)
])
# self.output_linear = nn.Linear(d_model, d_model // 2)
# self.norm = Normalization(d_model, transposed=True, _name_='layer')
def forward(self, x, tokens=None, text_lengths=None):
# Arguments must be in this order
# lengths, tokens, text_lengths = args
assert tokens is not None and text_lengths is not None
# Calculate the text embedding
text_embedding = self.text_embedding(tokens) # (B, L, D)
text_embedding = text_embedding.transpose(1, 2) # (B, D, L)
for layer in self.text_encoder:
text_embedding, _ = layer(text_embedding)
if self.reversal:
# Reverse the sequence
text_embedding = text_embedding.fliplr()
# text_embedding = self.norm(text_embedding)
text_embedding = text_embedding.transpose(1, 2)
# Zero out the embedding for padding tokens
mask = (tokens != self.padding_idx).unsqueeze(2)
text_embedding = text_embedding * mask.float()
# Calculate the mean embedding for each sequence (normalizing by appropriate token lengths)
text_embedding = text_embedding.sum(dim=1) / text_lengths.float().unsqueeze(1)
# text_embedding = self.output_linear(text_embedding)
# Add the text embedding to the sequence embedding (for global conditioning)
x = x + text_embedding.unsqueeze(1)
return x
# For every type of encoder/decoder, specify:
# - constructor class
# - list of attributes to grab from dataset
# - list of attributes to grab from model
registry = {
"stop": Encoder,
"id": nn.Identity,
"embedding": nn.Embedding,
"linear": nn.Linear,
"position": PositionalEncoder,
"class": ClassEmbedding,
"pack": PackedEncoder,
"time": TimeEncoder,
"onehot": OneHotEncoder,
"conv1d": Conv1DEncoder,
"eegage": EEGAgeEncoder,
"patch3d": Conv3DPatchEncoder,
"patch2d": Conv2DPatchEncoder,
"textcond": TextConditionalEncoder,
"timestamp_embedding": TimestampEmbeddingEncoder,
"tsindex_embedding": TSIndexEmbeddingEncoder,
"layer": LayerEncoder,
}
dataset_attrs = {
"embedding": ["n_tokens"],
"textcond": ["vocab_size"],
"linear": ["d_input"], # TODO make this d_data?
"class": ["n_classes"],
"time": ["n_tokens_time"],
"onehot": ["n_tokens"],
"conv1d": ["d_input"],
"patch2d": ["d_input"],
"tsindex_embedding": ["n_ts"],
}
model_attrs = {
"embedding": ["d_model"],
"textcond": ["d_model"],
"linear": ["d_model"],
"position": ["d_model"],
"class": ["d_model"],
"time": ["d_model"],
"onehot": ["d_model"],
"conv1d": ["d_model"],
"patch2d": ["d_model"],
"eegage": ["d_model"],
"timestamp_embedding": ["d_model"],
"tsindex_embedding": ["d_model"],
"layer": ["d_model"],
}
def _instantiate(encoder, dataset=None, model=None):
"""Instantiate a single encoder"""
if encoder is None:
return None
if isinstance(encoder, str):
name = encoder
else:
name = encoder["_name_"]
# Extract dataset/model arguments from attribute names
dataset_args = utils.config.extract_attrs_from_obj(
dataset, *dataset_attrs.get(name, [])
)
model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))
# Instantiate encoder
obj = utils.instantiate(registry, encoder, *dataset_args, *model_args)
return obj
def instantiate(encoder, dataset=None, model=None):
encoder = utils.to_list(encoder)
return U.PassthroughSequential(
*[_instantiate(e, dataset=dataset, model=model) for e in encoder]
)
| state-spaces-main | src/tasks/encoders.py |
"""Log parameter counts to WandB."""
from typing import Any
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
class ParamsLog(pl.Callback):
""" Log the number of parameters of the model """
def __init__(
self,
total: bool = True,
trainable: bool = True,
fixed: bool = True,
):
super().__init__()
self._log_stats = AttributeDict(
{
'total_params_log': total,
'trainable_params_log': trainable,
'non_trainable_params_log': fixed,
}
)
@rank_zero_only
def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
logs = {}
if self._log_stats.total_params_log:
logs["params/total"] = sum(p.numel() for p in pl_module.parameters())
if self._log_stats.trainable_params_log:
logs["params/trainable"] = sum(p.numel() for p in pl_module.parameters()
if p.requires_grad)
if self._log_stats.non_trainable_params_log:
logs["params/fixed"] = sum(p.numel() for p in pl_module.parameters()
if not p.requires_grad)
if trainer.logger:
trainer.logger.log_hyperparams(logs)
| state-spaces-main | src/callbacks/params.py |
"""PL callbacks for logging to WandB.
From https://github.com/HazyResearch/transformers/blob/master/src/callbacks/wandb_callbacks.py.
"""
import glob
import os
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModel(Callback):
"""Make wandb watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeAsArtifact(Callback):
"""Upload all *.py files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str):
self.code_dir = code_dir
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True):
code.add_file(path)
experiment.log_artifact(code)
class UploadCheckpointsAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
@rank_zero_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in glob.glob(os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True):
ckpts.add_file(path)
experiment.log_artifact(ckpts)
class LogConfusionMatrix(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmap(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(preds, targets, average=None)
r = recall_score(preds, targets, average=None)
p = precision_score(preds, targets, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogImagePredictions(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, axis=-1)
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
class LogDT(Callback):
"""Log the dt values (from NeurIPS 2021 LSSL submission)."""
def on_train_epoch_end(self, trainer, pl_module):
log_dict = {}
for name, m in pl_module.model.named_modules():
if pl_module.hparams.train.get('log_dt', False) \
and hasattr(m, "log_dt"):
log_dict[f"{name}.log_dt"] = (
m.log_dt.detach().cpu().numpy().flatten()
)
log_dict[f"{name}.log_dt.image"] = wandb.Image(
m.log_dt.detach().cpu().numpy().flatten().reshape(1, -1)
)
log_dict[f"{name}.log_dt"] = wandb.Table(
dataframe=pd.DataFrame(
{"log_dt": m.log_dt.detach().cpu().numpy().flatten()}
)
)
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
if trainer.logger is not None:
trainer.logger.experiment.log(log_dict)
| state-spaces-main | src/callbacks/wandb.py |
"""PL callbacks for monitoring computation speed.
Adapted from https://github.com/HazyResearch/transformers/blob/master/src/callbacks/speed_monitor.py.
In turn adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor.
We only need the speed monitoring, not the GPU monitoring.
"""
import time
from typing import Any
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
class Timer(Callback):
"""Monitor the speed of each step and each epoch.
"""
def __init__(
self,
step: bool = True,
inter_step: bool = True,
epoch: bool = True,
val: bool = True,
):
super().__init__()
self._log_stats = AttributeDict( {
'step_time': step,
'inter_step_time': inter_step,
'epoch_time': epoch,
'val_time': val,
})
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_epoch_time = None
def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_step_time = None
self._snap_inter_step_time = None
self._snap_epoch_time = time.time()
def on_train_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.step_time:
self._snap_step_time = time.time()
if not self._should_log(trainer):
return
logs = {}
if self._log_stats.inter_step_time and self._snap_inter_step_time:
# First log at beginning of second step
logs["timer/inter_step"] = (time.time() - self._snap_inter_step_time) # * 1000
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if not self._should_log(trainer):
return
logs = {}
if self._log_stats.step_time and self._snap_step_time:
logs["timer/step"] = (time.time() - self._snap_step_time) # * 1000
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule,) -> None:
logs = {}
if self._log_stats.epoch_time and self._snap_epoch_time:
logs["timer/epoch"] = time.time() - self._snap_epoch_time
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
def on_validation_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_val_time = time.time()
@rank_zero_only
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule,) -> None:
logs = {}
if self._log_stats.val_time and self._snap_val_time:
logs["timer/validation"] = time.time() - self._snap_val_time
if trainer.logger: trainer.logger.log_metrics(logs) # , step=trainer.global_step)
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
| state-spaces-main | src/callbacks/timer.py |
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from omegaconf import OmegaConf
class TrackNorms(pl.Callback):
# TODO do callbacks happen before or after the method in the main LightningModule?
# @rank_zero_only # needed?
def on_after_training_step(self, batch, batch_idx, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Log extra metrics
metrics = {}
if hasattr(pl_module, "_grad_norms"):
metrics.update(pl_module._grad_norms)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
def on_after_backward(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# example to inspect gradient information in tensorboard
if OmegaConf.select(trainer.hparams, 'train.track_grad_norms'): # TODO dot notation should work with omegaconf?
norms = {}
for name, p in pl_module.named_parameters():
if p.grad is None:
continue
# param_norm = float(p.grad.data.norm(norm_type))
param_norm = torch.mean(p.grad.data ** 2)
norms[f"grad_norm.{name}"] = param_norm
pl_module._grad_norms = norms
| state-spaces-main | src/callbacks/norms.py |
"""Callbacks for progressive resizing of images, used in S4ND paper."""
import numpy as np
from pytorch_lightning.callbacks import Callback
import src.utils as utils
from src.utils import registry
class ProgressiveResizing(Callback):
def __init__(self, stage_params: list):
"""
stage_params is a list of dicts
e.g. stage_params = [
{'resolution': 4, 'epochs': 50}, # 32 x 32
{'resolution': 2, 'epochs': 30}, # 64 x 64
{'resolution': 1, 'epochs': 20}, # 128 x 128
]
"""
super().__init__()
assert len(stage_params) > 0, 'No stages specified'
assert all([{'resolution', 'epochs'} <= set(stage.keys()) for stage in stage_params]), \
'stage_params must contain keys: resolution and epochs'
self.stage_params = stage_params
self.stage_epochs_cume = np.cumsum([stage['epochs'] for stage in stage_params])
self._current_stage = 0
def _verify_stages(self, trainer, model):
# Double-check that stage parameters are correct, otherwise we'll fail in the middle of training
for stage in self.stage_params:
if hasattr(stage, 'scheduler'):
# Verify that we can actually create the scheduler when we need to update it in each stage
scheduler = utils.instantiate(registry.scheduler, {**model.hparams.scheduler, **stage['scheduler']}, trainer.optimizers[0])
del scheduler
def on_train_start(self, trainer, model) -> None:
# Verify all the stage parameters are correct
self._verify_stages(trainer, model)
print(f"Training starts at {trainer.current_epoch}")
if trainer.current_epoch == 0:
# Update the model to the first stage
self._update_to_current_stage(trainer, model)
else:
# Preemption or resumption of progressive resizing
# Update the stage to the current one
self._current_stage = int(np.searchsorted(self.stage_epochs_cume - 1, trainer.current_epoch))
self._starting_stage = np.any(trainer.current_epoch == self.stage_epochs_cume)
print("Progressive Resizing: Restarting at Stage {}".format(self._current_stage))
if self._starting_stage:
self._update_lr_scheduler(trainer, model)
# Set the dataloader and model
self._update_dataloaders(trainer, model)
self._update_model(trainer, model)
return super().on_train_start(trainer, model)
def _update_lr_scheduler(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'scheduler'):
# No scheduler specified, so don't update the current scheduler
return
assert len(trainer.lr_schedulers) == 1
# Reinitialize the scheduler
# We don't need to carry over information from the last scheduler e.g. the last_epoch property,
# because that will mess with the new scheduler when we step it
hparams = {**model.hparams.scheduler, **self.stage_params[self._current_stage]['scheduler']}
# Note that passing in the optimizer below is okay: the scheduler will be reinitialized and doesn't seem to inherit any current lr info from the optimizer
trainer.lr_schedulers[0]['scheduler'] = utils.instantiate(registry.scheduler, hparams, trainer.optimizers[0])
print("\tChanged scheduler to {}".format(hparams))
def _update_dataloaders(self, trainer, model):
# Set the train resolution and reset the dataloader
model.hparams.loader.train_resolution = self.stage_params[self._current_stage]['resolution']
trainer.reset_train_dataloader(model)
print('\tChanged resolution to {}'.format(self.stage_params[self._current_stage]['resolution']))
def _update_model(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'bandlimit'):
return
# Update the bandlimit value for the model: this is a hack to make sure the model is updated
# Iterate over all the modules
for module in model.modules():
if hasattr(module, 'bandlimit'):
module.bandlimit = self.stage_params[self._current_stage]['bandlimit']
print('\tChanged bandlimit to {}'.format(self.stage_params[self._current_stage]['bandlimit']))
def _update_to_current_stage(self, trainer, model):
print("Progressive Resizing: Moving to Stage {}".format(self._current_stage))
# Update the train dataloader, model and scheduler
self._update_dataloaders(trainer, model)
self._update_model(trainer, model)
self._update_lr_scheduler(trainer, model)
def on_train_epoch_end(self, trainer, model):
"""
Check to see if new stage is reached for the next epoch, and if so, prepare the new stage by
changing the dataloader.
(We do next epoch so that the dataloader is prepared before the next epoch)
"""
next_epoch = trainer.current_epoch + 1
# Check if stage should be increased
if next_epoch >= self.stage_epochs_cume[self._current_stage] and self._current_stage < len(self.stage_params) - 1:
self._current_stage += 1
self._update_to_current_stage(trainer, model)
return super().on_train_epoch_end(trainer, model)
| state-spaces-main | src/callbacks/progressive_resizing.py |
"""Synthetic datasets."""
import numpy as np
import torch
import torchvision
from einops.layers.torch import Rearrange
from src.utils import permutations
from src.dataloaders.base import SequenceDataset
class Copying(SequenceDataset):
_name_ = "copying"
@property
def init_defaults(self):
return {
"l_noise": 100, # number of padding tokens
"l_memorize": 10, # number of tokens to memorize
"n_tokens": 10, # alphabet size
"lag": False,
"variable": False, # Randomly distribute memorization tokens throughout sequence instead of frontloading them
"variable_length": False, # Randomize number of tokens to memorize
"one_hot": False,
"reverse": False,
"static": False, # Use a static dataset of size n_train, otherwise always use random data with n_train per epoch
"n_train": 10000,
"n_eval": 1000,
}
@property
def d_input(self):
return self.n_tokens
@property
def d_output(self):
return self.n_tokens
@property
def l_output(self):
return self.l_noise if self.lag else self.l_memorize
def setup(self):
from .datasets.copying import CopyingEvalDataset, CopyingTrainDataset
if self.static: train_cls = CopyingEvalDataset
else: train_cls = CopyingTrainDataset
self.dataset_train = train_cls(
self.l_noise,
self.l_memorize,
self.n_tokens,
samples=self.n_train,
lag=self.lag,
variable=self.variable,
one_hot=self.one_hot,
reverse=self.reverse,
)
self.dataset_val = CopyingEvalDataset(
self.l_noise,
self.l_memorize,
self.n_tokens,
samples=self.n_eval,
lag=self.lag,
variable=self.variable,
one_hot=self.one_hot,
reverse=self.reverse,
)
self.dataset_test = None
def __str__(self):
return f"{self._name_}{self.l_noise}{'v' if self.variable else ''}"
class Adding(SequenceDataset):
_name_ = "adding"
d_input = 2
d_output = 1
l_output = 0
@property
def init_defaults(self):
return {
"l_max": 1000,
"n_samples": 50000,
"val_split": 0.1,
}
def setup(self):
from .datasets.adding import adding_static_dataset
self.dataset_train = adding_static_dataset(self.l_max, self.n_samples)
self.dataset_test = None
self.split_train_val(self.val_split)
def __str__(self):
return f"{self._name_}{self.l_max}"
class Reconstruct(SequenceDataset):
_name_ = "reconstruct"
@property
def init_defaults(self):
return {
"l_seq": 1024, # length of total sequence
"l_mem": 512, # length to reconstruct
"dt": 0.001,
"freq": 1.0,
"seed": 0,
"static": False, # Use a static dataset of size n_train, otherwise always use random data with n_train per epoch
"n_train": 10000,
"n_eval": 1000,
}
@property
def d_input(self):
return 1
@property
def d_output(self):
return self.l_mem
@property
def l_output(self):
return 0
def setup(self):
from .datasets.reconstruct import ReconstructEvalDataset, ReconstructTrainDataset
if self.static: train_cls = ReconstructEvalDataset
else: train_cls = ReconstructTrainDataset
self.dataset_train = train_cls(
samples=self.n_train,
l_seq=self.l_seq,
l_mem=self.l_mem,
dt=self.dt,
freq=self.freq,
seed=self.seed,
)
self.dataset_val = ReconstructEvalDataset(
samples=self.n_eval,
l_seq=self.l_seq,
l_mem=self.l_mem,
dt=self.dt,
freq=self.freq,
seed=self.seed,
)
self.dataset_test = None
def __str__(self):
raise NotImplementedError
class Delay(SequenceDataset):
_name_ = "delay"
@property
def init_defaults(self):
return {
"l_seq": 1024, # length of total sequence
"n_lag": 1, # length to reconstruct
"l_lag": None, # length to reconstruct
"dt": 0.001,
"freq": 100.0,
"static": False, # Use a static dataset of size n_train, otherwise always use random data with n_train per epoch
"n_train": 10000,
"n_eval": 1000,
}
@property
def d_input(self):
return 1
@property
def d_output(self):
# NOTE: To reproduce numbers from HTTYH paper, set this equal to 4. There was a bug in the implementation at the time
return self.n_lag
@property
def l_output(self):
return self.l_seq
def setup(self):
from .datasets.delay import DelayEvalDataset, DelayTrainDataset
if self.static: train_cls = DelayEvalDataset
else: train_cls = DelayTrainDataset
self.dataset_train = train_cls(
samples=self.n_train,
l_seq=self.l_seq,
n_lag=self.n_lag,
l_lag=self.l_lag,
dt=self.dt,
freq=self.freq,
)
self.dataset_val = DelayEvalDataset(
samples=self.n_eval,
l_seq=self.l_seq,
n_lag=self.n_lag,
l_lag=self.l_lag,
dt=self.dt,
freq=self.freq,
)
self.dataset_test = None
def __str__(self):
return f"{self._name_}{self.l_noise}{'v' if self.variable else ''}"
class MackeyGlass(SequenceDataset):
_name_ = "mackey"
@property
def init_defaults(self):
return {
"l_seq": 5000, # length of total sequence
"l_predict": 15, # length to reconstruct
"tau": 17, # Delay of MG system
"washout": 100,
"delta_t": 10,
"n_train": 1024,
"n_eval": 64,
}
@property
def d_input(self):
return 1
@property
def d_output(self):
return 1
@property
def l_output(self):
return self.l_seq
def setup(self):
from .datasets.mackey import mackey_glass
train_X, train_Y = mackey_glass(
n_samples=self.n_train,
l_seq=self.l_seq,
l_predict=self.l_predict,
tau=self.tau,
washout=self.washout,
delta_t=self.delta_t,
)
train_X, train_Y = torch.FloatTensor(train_X), torch.FloatTensor(train_Y)
val_X, val_Y = mackey_glass(
n_samples=self.n_eval,
l_seq=self.l_seq,
l_predict=self.l_predict,
tau=self.tau,
washout=self.washout,
delta_t=self.delta_t,
)
val_X, val_Y = torch.FloatTensor(val_X), torch.FloatTensor(val_Y)
self.dataset_train = torch.utils.data.TensorDataset(train_X, train_Y)
self.dataset_val = torch.utils.data.TensorDataset(val_X, val_Y)
self.dataset_test = None
def __str__(self):
return f"{self._name_}"
| state-spaces-main | src/dataloaders/synthetic.py |
"""Long Range Arena datasets."""
import io
import logging
import os
import pickle
from pathlib import Path
import torch
from torch import nn
import torch.nn.functional as F
import torchtext
import torchvision
from einops.layers.torch import Rearrange, Reduce
from PIL import Image # Only used for Pathfinder
from datasets import DatasetDict, Value, load_dataset
from src.dataloaders.base import default_data_path, SequenceDataset, ImageResolutionSequenceDataset
class IMDB(SequenceDataset):
_name_ = "imdb"
d_output = 2
l_output = 0
@property
def init_defaults(self):
return {
"l_max": 4096,
"level": "char",
"min_freq": 15,
"seed": 42,
"val_split": 0.0,
"append_bos": False,
"append_eos": True,
# 'max_vocab': 135,
"n_workers": 4, # Only used for tokenizing dataset before caching
}
@property
def n_tokens(self):
return len(self.vocab)
def prepare_data(self):
if self.cache_dir is None: # Just download the dataset
load_dataset(self._name_, cache_dir=self.data_dir)
else: # Process the dataset and save it
self.process_dataset()
def setup(self, stage=None):
"""If cache_dir is not None, we'll cache the processed dataset there."""
self.data_dir = self.data_dir or default_data_path / self._name_
self.cache_dir = self.data_dir / "cache"
assert self.level in [
"word",
"char",
], f"level {self.level} not supported"
if stage == "test" and hasattr(self, "dataset_test"):
return
dataset, self.tokenizer, self.vocab = self.process_dataset()
print(
f"IMDB {self.level} level | min_freq {self.min_freq} | vocab size {len(self.vocab)}"
)
dataset.set_format(type="torch", columns=["input_ids", "label"])
# Create all splits
dataset_train, self.dataset_test = dataset["train"], dataset["test"]
if self.val_split == 0.0:
# Use test set as val set, as done in the LRA paper
self.dataset_train, self.dataset_val = dataset_train, None
else:
train_val = dataset_train.train_test_split(
test_size=self.val_split, seed=self.seed
)
self.dataset_train, self.dataset_val = (
train_val["train"],
train_val["test"],
)
def _collate_fn(self, batch):
xs, ys = zip(*[(data["input_ids"], data["label"]) for data in batch])
lengths = torch.tensor([len(x) for x in xs])
xs = nn.utils.rnn.pad_sequence(
xs, padding_value=self.vocab["<pad>"], batch_first=True
)
ys = torch.tensor(ys)
return xs, ys, {"lengths": lengths}
# self._collate_fn = collate_batch
def process_dataset(self):
cache_dir = (
None if self.cache_dir is None else self.cache_dir / self._cache_dir_name
)
if cache_dir is not None:
if cache_dir.is_dir():
return self._load_from_cache(cache_dir)
dataset = load_dataset(self._name_, cache_dir=self.data_dir)
dataset = DatasetDict(train=dataset["train"], test=dataset["test"])
if self.level == "word":
tokenizer = torchtext.data.utils.get_tokenizer(
"spacy", language="en_core_web_sm"
)
else: # self.level == 'char'
tokenizer = list # Just convert a string to a list of chars
# Account for <bos> and <eos> tokens
l_max = self.l_max - int(self.append_bos) - int(self.append_eos)
tokenize = lambda example: {"tokens": tokenizer(example["text"])[:l_max]}
dataset = dataset.map(
tokenize,
remove_columns=["text"],
keep_in_memory=True,
load_from_cache_file=False,
num_proc=max(self.n_workers, 1),
)
vocab = torchtext.vocab.build_vocab_from_iterator(
dataset["train"]["tokens"],
min_freq=self.min_freq,
specials=(
["<pad>", "<unk>"]
+ (["<bos>"] if self.append_bos else [])
+ (["<eos>"] if self.append_eos else [])
),
)
vocab.set_default_index(vocab["<unk>"])
numericalize = lambda example: {
"input_ids": vocab(
(["<bos>"] if self.append_bos else [])
+ example["tokens"]
+ (["<eos>"] if self.append_eos else [])
)
}
dataset = dataset.map(
numericalize,
remove_columns=["tokens"],
keep_in_memory=True,
load_from_cache_file=False,
num_proc=max(self.n_workers, 1),
)
if cache_dir is not None:
self._save_to_cache(dataset, tokenizer, vocab, cache_dir)
return dataset, tokenizer, vocab
def _save_to_cache(self, dataset, tokenizer, vocab, cache_dir):
cache_dir = self.cache_dir / self._cache_dir_name
logger = logging.getLogger(__name__)
logger.info(f"Saving to cache at {str(cache_dir)}")
dataset.save_to_disk(str(cache_dir))
with open(cache_dir / "tokenizer.pkl", "wb") as f:
pickle.dump(tokenizer, f)
with open(cache_dir / "vocab.pkl", "wb") as f:
pickle.dump(vocab, f)
def _load_from_cache(self, cache_dir):
assert cache_dir.is_dir()
logger = logging.getLogger(__name__)
logger.info(f"Load from cache at {str(cache_dir)}")
dataset = DatasetDict.load_from_disk(str(cache_dir))
with open(cache_dir / "tokenizer.pkl", "rb") as f:
tokenizer = pickle.load(f)
with open(cache_dir / "vocab.pkl", "rb") as f:
vocab = pickle.load(f)
return dataset, tokenizer, vocab
@property
def _cache_dir_name(self):
return f"l_max-{self.l_max}-level-{self.level}-min_freq-{self.min_freq}-append_bos-{self.append_bos}-append_eos-{self.append_eos}"
class TabularDataset(torch.utils.data.Dataset):
def __init__(
self,
path,
format,
col_idx=None,
skip_header=False,
csv_reader_params=None,
):
"""
col_idx: the indices of the columns.
"""
if csv_reader_params is None:
csv_reader_params = {}
format = format.lower()
assert format in ["tsv", "csv"]
with io.open(os.path.expanduser(path), encoding="utf8") as f:
if format == "csv":
reader = torchtext.utils.unicode_csv_reader(f, **csv_reader_params)
elif format == "tsv":
reader = torchtext.utils.unicode_csv_reader(
f, delimiter="\t", **csv_reader_params
)
else:
reader = f
if skip_header:
next(reader)
self._data = [
line if col_idx is None else [line[c] for c in col_idx]
for line in reader
]
def __len__(self):
return len(self._data)
def __getitem__(self, idx):
return self._data[idx]
# LRA tokenizer renames ']' to 'X' and delete parentheses as their tokenizer removes
# non-alphanumeric characters.
# https://github.com/google-research/long-range-arena/blob/264227cbf9591e39dd596d2dc935297a2070bdfe/lra_benchmarks/listops/input_pipeline.py#L46
def listops_tokenizer(s):
return s.translate({ord("]"): ord("X"), ord("("): None, ord(")"): None}).split()
class ListOps(SequenceDataset):
_name_ = "listops"
d_output = 10
l_output = 0
@property
def init_defaults(self):
return {
"l_max": 2048,
"append_bos": False,
"append_eos": True,
# 'max_vocab': 20, # Actual size 18
"n_workers": 4, # Only used for tokenizing dataset
}
@property
def n_tokens(self):
return len(self.vocab)
@property
def _cache_dir_name(self):
return f"l_max-{self.l_max}-append_bos-{self.append_bos}-append_eos-{self.append_eos}"
def init(self):
if self.data_dir is None:
self.data_dir = default_data_path / self._name_
self.cache_dir = self.data_dir / self._cache_dir_name
def prepare_data(self):
if self.cache_dir is None:
for split in ["train", "val", "test"]:
split_path = self.data_dir / f"basic_{split}.tsv"
if not split_path.is_file():
raise FileNotFoundError(
f"""
File {str(split_path)} not found.
To get the dataset, download lra_release.gz from
https://github.com/google-research/long-range-arena,
then unzip it with tar -xvf lra_release.gz.
Then point data_dir to the listops-1000 directory.
"""
)
else: # Process the dataset and save it
self.process_dataset()
def setup(self, stage=None):
if stage == "test" and hasattr(self, "dataset_test"):
return
dataset, self.tokenizer, self.vocab = self.process_dataset()
self.vocab_size = len(self.vocab)
dataset.set_format(type="torch", columns=["input_ids", "Target"])
self.dataset_train, self.dataset_val, self.dataset_test = (
dataset["train"],
dataset["val"],
dataset["test"],
)
def collate_batch(batch):
xs, ys = zip(*[(data["input_ids"], data["Target"]) for data in batch])
lengths = torch.tensor([len(x) for x in xs])
xs = nn.utils.rnn.pad_sequence(
xs, padding_value=self.vocab["<pad>"], batch_first=True
)
ys = torch.tensor(ys)
return xs, ys, {"lengths": lengths}
self._collate_fn = collate_batch
def process_dataset(self):
cache_dir = (
None if self.cache_dir is None else self.cache_dir / self._cache_dir_name
)
if cache_dir is not None:
if cache_dir.is_dir():
return self._load_from_cache(cache_dir)
dataset = load_dataset(
"csv",
data_files={
"train": str(self.data_dir / "basic_train.tsv"),
"val": str(self.data_dir / "basic_val.tsv"),
"test": str(self.data_dir / "basic_test.tsv"),
},
delimiter="\t",
keep_in_memory=True,
)
tokenizer = listops_tokenizer
# Account for <bos> and <eos> tokens
l_max = self.l_max - int(self.append_bos) - int(self.append_eos)
tokenize = lambda example: {"tokens": tokenizer(example["Source"])[:l_max]}
dataset = dataset.map(
tokenize,
remove_columns=["Source"],
keep_in_memory=True,
load_from_cache_file=False,
num_proc=max(self.n_workers, 1),
)
vocab = torchtext.vocab.build_vocab_from_iterator(
dataset["train"]["tokens"],
specials=(
["<pad>", "<unk>"]
+ (["<bos>"] if self.append_bos else [])
+ (["<eos>"] if self.append_eos else [])
),
)
vocab.set_default_index(vocab["<unk>"])
numericalize = lambda example: {
"input_ids": vocab(
(["<bos>"] if self.append_bos else [])
+ example["tokens"]
+ (["<eos>"] if self.append_eos else [])
)
}
dataset = dataset.map(
numericalize,
remove_columns=["tokens"],
keep_in_memory=True,
load_from_cache_file=False,
num_proc=max(self.n_workers, 1),
)
if cache_dir is not None:
self._save_to_cache(dataset, tokenizer, vocab, cache_dir)
return dataset, tokenizer, vocab
def _save_to_cache(self, dataset, tokenizer, vocab, cache_dir):
cache_dir = self.cache_dir / self._cache_dir_name
logger = logging.getLogger(__name__)
logger.info(f"Saving to cache at {str(cache_dir)}")
dataset.save_to_disk(str(cache_dir))
with open(cache_dir / "tokenizer.pkl", "wb") as f:
pickle.dump(tokenizer, f)
with open(cache_dir / "vocab.pkl", "wb") as f:
pickle.dump(vocab, f)
def _load_from_cache(self, cache_dir):
assert cache_dir.is_dir()
logger = logging.getLogger(__name__)
logger.info(f"Load from cache at {str(cache_dir)}")
dataset = DatasetDict.load_from_disk(str(cache_dir))
with open(cache_dir / "tokenizer.pkl", "rb") as f:
tokenizer = pickle.load(f)
with open(cache_dir / "vocab.pkl", "rb") as f:
vocab = pickle.load(f)
return dataset, tokenizer, vocab
class PathFinderDataset(torch.utils.data.Dataset):
"""Path Finder dataset."""
# There's an empty file in the dataset
blacklist = {"pathfinder32/curv_baseline/imgs/0/sample_172.png"}
def __init__(self, data_dir, transform=None):
"""
Args:
data_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.data_dir = Path(data_dir).expanduser()
assert self.data_dir.is_dir(), f"data_dir {str(self.data_dir)} does not exist"
self.transform = transform
samples = []
# for diff_level in ['curv_baseline', 'curv_contour_length_9', 'curv_contour_length_14']:
for diff_level in ["curv_contour_length_14"]:
path_list = sorted(
list((self.data_dir / diff_level / "metadata").glob("*.npy")),
key=lambda path: int(path.stem),
)
assert path_list, "No metadata found"
for metadata_file in path_list:
with open(metadata_file, "r") as f:
for metadata in f.read().splitlines():
metadata = metadata.split()
image_path = Path(diff_level) / metadata[0] / metadata[1]
if (
str(Path(self.data_dir.stem) / image_path)
not in self.blacklist
):
label = int(metadata[3])
samples.append((image_path, label))
self.samples = samples
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
path, target = self.samples[idx]
# https://github.com/pytorch/vision/blob/9b29f3f22783112406d9c1a6db47165a297c3942/torchvision/datasets/folder.py#L247
with open(self.data_dir / path, "rb") as f:
sample = Image.open(f).convert("L") # Open in grayscale
if self.transform is not None:
sample = self.transform(sample)
return sample, target
class PathFinder(ImageResolutionSequenceDataset):
_name_ = "pathfinder"
d_input = 1
d_output = 2
l_output = 0
@property
def n_tokens(self):
if self.tokenize:
return 256
@property
def init_defaults(self):
return {
"resolution": 32,
"sequential": True,
"tokenize": False,
"center": True,
"pool": 1,
"val_split": 0.1,
"test_split": 0.1,
"seed": 42, # Controls the train/val/test split
}
def default_transforms(self):
transform_list = [torchvision.transforms.ToTensor()]
if self.pool > 1:
transform_list.append(
Reduce(
"1 (h h2) (w w2) -> 1 h w",
"mean",
h2=self.pool,
w2=self.pool,
)
)
if self.tokenize:
transform_list.append(
torchvision.transforms.Lambda(lambda x: (x * 255).long())
)
else:
if self.center:
transform_list.append(torchvision.transforms.Normalize(mean=0.5, std=0.5))
if self.sequential:
# If tokenize, it makes more sense to get rid of the channel dimension
transform_list.append(
Rearrange("1 h w -> (h w)")
if self.tokenize
else Rearrange("1 h w -> (h w) 1")
)
else:
transform_list.append(Rearrange("1 h w -> h w 1"))
return torchvision.transforms.Compose(transform_list)
def prepare_data(self):
if not self.data_dir.is_dir():
raise FileNotFoundError(
f"""
Directory {str(self.data_dir)} not found.
To get the dataset, download lra_release.gz from
https://github.com/google-research/long-range-arena,
then unzip it with tar -xvf lra_release.gz.
Then point data_dir to the pathfinderX directory, where X is either 32, 64, 128, or 256.
"""
)
def setup(self, stage=None):
if self.data_dir is None:
self.data_dir = (
default_data_path / self._name_ / f"pathfinder{self.resolution}"
)
if stage == "test" and hasattr(self, "dataset_test"):
return
# [2021-08-18] TD: I ran into RuntimeError: Too many open files.
# https://github.com/pytorch/pytorch/issues/11201
torch.multiprocessing.set_sharing_strategy("file_system")
dataset = PathFinderDataset(self.data_dir, transform=self.default_transforms())
len_dataset = len(dataset)
val_len = int(self.val_split * len_dataset)
test_len = int(self.test_split * len_dataset)
train_len = len_dataset - val_len - test_len
(
self.dataset_train,
self.dataset_val,
self.dataset_test,
) = torch.utils.data.random_split(
dataset,
[train_len, val_len, test_len],
generator=torch.Generator().manual_seed(self.seed),
)
class AAN(SequenceDataset):
_name_ = "aan"
d_output = 2 # Use accuracy instead of binary_accuracy
l_output = 0
@property
def n_tokens(self):
return len(self.vocab)
@property
def init_defaults(self):
return {
"l_max": 4000,
# 'max_vocab': 100, # Full size 98
"append_bos": False,
"append_eos": True,
"n_workers": 4, # For tokenizing only
}
@property
def _cache_dir_name(self):
return f"l_max-{self.l_max}-append_bos-{self.append_bos}-append_eos-{self.append_eos}"
def init(self):
if self.data_dir is None:
self.data_dir = default_data_path / self._name_
self.cache_dir = self.data_dir / self._cache_dir_name
def prepare_data(self):
if self.cache_dir is None:
for split in ["train", "eval", "test"]:
split_path = self.data_dir / f"new_aan_pairs.{split}.tsv"
if not split_path.is_file():
raise FileNotFoundError(
f"""
File {str(split_path)} not found.
To get the dataset, download lra_release.gz from
https://github.com/google-research/long-range-arena,
then unzip it with tar -xvf lra_release.gz.
Then point data_dir to the tsv_data directory.
"""
)
else: # Process the dataset and save it
self.process_dataset()
def setup(self, stage=None):
if stage == "test" and hasattr(self, "dataset_test"):
return
# [2021-08-18] TD: I ran into RuntimeError: Too many open files.
# https://github.com/pytorch/pytorch/issues/11201
torch.multiprocessing.set_sharing_strategy("file_system")
dataset, self.tokenizer, self.vocab = self.process_dataset()
# self.vocab_size = len(self.vocab)
print("AAN vocab size:", len(self.vocab))
dataset.set_format(type="torch", columns=["input_ids1", "input_ids2", "label"])
self.dataset_train, self.dataset_val, self.dataset_test = (
dataset["train"],
dataset["val"],
dataset["test"],
)
def collate_batch(batch):
xs1, xs2, ys = zip(
*[
(data["input_ids1"], data["input_ids2"], data["label"])
for data in batch
]
)
lengths1 = torch.tensor([len(x) for x in xs1])
lengths2 = torch.tensor([len(x) for x in xs2])
xs1 = nn.utils.rnn.pad_sequence(
xs1, padding_value=self.vocab["<pad>"], batch_first=True
)
xs2 = nn.utils.rnn.pad_sequence(
xs2, padding_value=self.vocab["<pad>"], batch_first=True
)
# Pad both to same length
# Shape (batch, length)
L = max(xs1.size(1), xs2.size(1))
xs1 = F.pad(xs1, (0, L-xs1.size(1)), value=self.vocab["<pad>"])
xs2 = F.pad(xs2, (0, L-xs2.size(1)), value=self.vocab["<pad>"])
ys = torch.tensor(ys)
# return xs1, xs2, ys, lengths1, lengths2
# Concatenate two batches
xs = torch.cat([xs1, xs2], dim=0)
lengths = torch.cat([lengths1, lengths2], dim=0)
return xs, ys, {"lengths": lengths}
self._collate_fn = collate_batch
def process_dataset(self):
cache_dir = (
None if self.cache_dir is None else self.cache_dir / self._cache_dir_name
)
if cache_dir is not None:
if cache_dir.is_dir():
return self._load_from_cache(cache_dir)
dataset = load_dataset(
"csv",
data_files={
"train": str(self.data_dir / "new_aan_pairs.train.tsv"),
"val": str(self.data_dir / "new_aan_pairs.eval.tsv"),
"test": str(self.data_dir / "new_aan_pairs.test.tsv"),
},
delimiter="\t",
column_names=["label", "input1_id", "input2_id", "text1", "text2"],
keep_in_memory=True,
) # True)
dataset = dataset.remove_columns(["input1_id", "input2_id"])
new_features = dataset["train"].features.copy()
new_features["label"] = Value("int32")
dataset = dataset.cast(new_features)
tokenizer = list # Just convert a string to a list of chars
# Account for <bos> and <eos> tokens
l_max = self.l_max - int(self.append_bos) - int(self.append_eos)
tokenize = lambda example: {
"tokens1": tokenizer(example["text1"])[:l_max],
"tokens2": tokenizer(example["text2"])[:l_max],
}
dataset = dataset.map(
tokenize,
remove_columns=["text1", "text2"],
keep_in_memory=True,
load_from_cache_file=False,
num_proc=max(self.n_workers, 1),
)
vocab = torchtext.vocab.build_vocab_from_iterator(
dataset["train"]["tokens1"] + dataset["train"]["tokens2"],
specials=(
["<pad>", "<unk>"]
+ (["<bos>"] if self.append_bos else [])
+ (["<eos>"] if self.append_eos else [])
),
)
vocab.set_default_index(vocab["<unk>"])
encode = lambda text: vocab(
(["<bos>"] if self.append_bos else [])
+ text
+ (["<eos>"] if self.append_eos else [])
)
numericalize = lambda example: {
"input_ids1": encode(example["tokens1"]),
"input_ids2": encode(example["tokens2"]),
}
dataset = dataset.map(
numericalize,
remove_columns=["tokens1", "tokens2"],
keep_in_memory=True,
load_from_cache_file=False,
num_proc=max(self.n_workers, 1),
)
if cache_dir is not None:
self._save_to_cache(dataset, tokenizer, vocab, cache_dir)
return dataset, tokenizer, vocab
def _save_to_cache(self, dataset, tokenizer, vocab, cache_dir):
cache_dir = self.cache_dir / self._cache_dir_name
logger = logging.getLogger(__name__)
logger.info(f"Saving to cache at {str(cache_dir)}")
dataset.save_to_disk(str(cache_dir))
with open(cache_dir / "tokenizer.pkl", "wb") as f:
pickle.dump(tokenizer, f)
with open(cache_dir / "vocab.pkl", "wb") as f:
pickle.dump(vocab, f)
def _load_from_cache(self, cache_dir):
assert cache_dir.is_dir()
logger = logging.getLogger(__name__)
logger.info(f"Load from cache at {str(cache_dir)}")
dataset = DatasetDict.load_from_disk(str(cache_dir))
with open(cache_dir / "tokenizer.pkl", "rb") as f:
tokenizer = pickle.load(f)
with open(cache_dir / "vocab.pkl", "rb") as f:
vocab = pickle.load(f)
return dataset, tokenizer, vocab
| state-spaces-main | src/dataloaders/lra.py |
"""Miscellaneous vision datasets."""
import os
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
from src.dataloaders.base import default_data_path, SequenceDataset
class CIFAR100(SequenceDataset):
_name_ = "cifar100"
d_output = 100
l_output = 0
@property
def init_defaults(self):
return {
"permute": None,
"grayscale": False,
"tokenize": False, # if grayscale, tokenize into discrete byte inputs
"augment": False,
"cutout": False,
"random_erasing": False,
"val_split": 0.1,
"seed": 42, # For validation split
}
@property
def d_input(self):
if self.grayscale:
if self.tokenize:
return 256
else:
return 1
else:
assert not self.tokenize
return 3
def setup(self):
if self.grayscale:
preprocessors = [
torchvision.transforms.Grayscale(),
torchvision.transforms.ToTensor(),
]
permutations_list = [
torchvision.transforms.Lambda(
lambda x: x.view(1, 1024).t()
) # (L, d_input)
]
if self.tokenize:
preprocessors.append(
torchvision.transforms.Lambda(lambda x: (x * 255).long())
)
permutations_list.append(Rearrange("l 1 -> l"))
else:
preprocessors.append(
torchvision.transforms.Normalize(
mean=122.6 / 255.0, std=61.0 / 255.0
)
)
else:
preprocessors = [
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
[0.507, 0.487, 0.441], [0.267, 0.256, 0.276]
),
]
permutations_list = [
torchvision.transforms.Lambda(
Rearrange("z h w -> (h w) z", z=3, h=32, w=32)
) # (L, d_input)
]
# Permutations and reshaping
if self.permute == "br":
permutation = permutations.bitreversal_permutation(1024)
print("bit reversal", permutation)
permutations_list.append(torchvision.transforms.Lambda(lambda x: x[permutation]))
elif self.permute == "snake":
permutation = permutations.snake_permutation(32, 32)
print("snake", permutation)
permutations_list.append(torchvision.transforms.Lambda(lambda x: x[permutation]))
elif self.permute == "hilbert":
permutation = permutations.hilbert_permutation(32)
print("hilbert", permutation)
permutations_list.append(torchvision.transforms.Lambda(lambda x: x[permutation]))
elif self.permute == "transpose":
permutation = permutations.transpose_permutation(32, 32)
transform = torchvision.transforms.Lambda(
lambda x: torch.cat([x, x[permutation]], dim=-1)
)
permutations_list.append(transform)
elif self.permute == "2d": # h, w, c
permutation = torchvision.transforms.Lambda(
Rearrange("(h w) c -> h w c", h=32, w=32)
)
permutations_list.append(permutation)
elif self.permute == "2d_transpose": # c, h, w
permutation = torchvision.transforms.Lambda(
Rearrange("(h w) c -> c h w", h=32, w=32)
)
permutations_list.append(permutation)
# Augmentation
if self.augment:
augmentations = [
torchvision.transforms.RandomCrop(
32, padding=4, padding_mode="symmetric"
),
torchvision.transforms.RandomHorizontalFlip(),
]
post_augmentations = []
if self.cutout:
post_augmentations.append(Cutout(1, 16))
pass
if self.random_erasing:
# augmentations.append(RandomErasing())
pass
else:
augmentations, post_augmentations = [], []
torchvision.transforms_train = (
augmentations + preprocessors + post_augmentations + permutations_list
)
torchvision.transforms_eval = preprocessors + permutations_list
transform_train = torchvision.transforms.Compose(torchvision.transforms_train)
transform_eval = torchvision.transforms.Compose(torchvision.transforms_eval)
self.dataset_train = torchvision.datasets.CIFAR100(
f"{default_data_path}/{self._name_}",
train=True,
download=True,
transform=transform_train,
)
self.dataset_test = torchvision.datasets.CIFAR100(
f"{default_data_path}/{self._name_}", train=False, transform=transform_eval
)
self.split_train_val(self.val_split)
def __str__(self):
return f"{'p' if self.permute else 's'}{self._name_}"
class CIFAR10C(SequenceDataset):
"""
Make sure to specify a corruption using e.g. `dataset.corruption=gaussian_blur`.
Corruption options are: ['brightness', 'contrast', 'defocus_blur',
'elastic_transform', 'fog', 'frost', 'gaussian_blur', 'gaussian_noise',
'glass_blur', 'impulse_noise', 'jpeg_compression', 'motion_blur',
'pixelate', 'saturate', 'shot_noise', 'snow', 'spatter',
'speckle_noise', 'zoom_blur']
A full example of a command using this dataset:
`python -m train wandb=null experiment=s4-cifar dataset=cifar-c +train.validate_at_start=true dataset.corruption=gaussian_blur`
Note that the metric people use for CIFAR-C is mean corruption error (mCE), normalized by
the accuracy AlexNet gets on the dataset. You can use this spreadsheet to calculate mCE:
https://docs.google.com/spreadsheets/d/1RwqofJPHhtdRPG-dDO7wPp-aGn-AmwmU5-rpvTzrMHw
"""
_name_ = "cifar-c"
d_output = 10
l_output = 0
@property
def init_defaults(self):
return {
"corruption": None,
}
@property
def d_input(self):
return 3
def setup(self):
from src.dataloaders.datasets.cifarc import _CIFAR10C
self.data_dir = self.data_dir or default_data_path / "CIFAR-10-C"
# make sure self.corruptions was specified and is a valid choice
assert self.corruption != None, "You must specify a corruption. Options are: " + \
str(sorted([p.stem for p in self.data_dir.glob("*.npy") if not p.stem == 'labels']))
assert os.path.isfile(os.path.join(self.data_dir,f"{self.corruption}.npy")), \
f"Corruption '{self.corruption}' does not exist. Options are: " + \
str(sorted([p.stem for p in self.data_dir.glob("*.npy") if not p.stem == 'labels']))
preprocessors = [
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)
),
]
permutations_list = [
torchvision.transforms.Lambda(
Rearrange("z h w -> (h w) z", z=3, h=32, w=32)
)
]
transform_eval = torchvision.transforms.Compose(preprocessors + permutations_list)
x = np.load(os.path.join(self.data_dir,f"{self.corruption}.npy"))
y = np.load(os.path.join(self.data_dir,"labels.npy"))
self.dataset_test = _CIFAR10C(x, y, transform_eval)
def __str__(self):
return f"{'p' if self.permute else 's'}{self._name_}"
class CIFAR10Generation(SequenceDataset):
"""TODO there should be a way to combine this with main CIFAR class. the issue is making sure the torchvision.transforms are applied to output in the same way."""
_name_ = "cifargen"
@property
def init_defaults(self):
return {
"transpose": False,
"tokenize": True,
"mixture": 0,
"val_split": 0.02,
"seed": 42,
}
@property
def d_input(self):
if not self.tokenize:
return 1 # Returns None otherwise
@property
def d_output(self):
return 256 if self.mixture == 0 else 3 * self.mixture
@property
def n_tokens(self):
if self.tokenize:
return 3 * 256 + 1
@property
def n_classes(self): # TODO not used?
return 10
@property
def permute(self):
if self.transpose: # R R ... G G ... B B ...
return lambda x: rearrange(x, "... h w c -> ... (c h w) 1")
else: # R G B R G B ...
return lambda x: rearrange(x, "... h w c -> ... (h w c) 1")
@property
def transforms0(self):
"""Transforms applied before permutation"""
if self.tokenize:
return torchvision.transforms.Lambda(
lambda x: x + 1 + torch.arange(3) * 256
)
else:
# return torchvision.transforms.Normalize(mean=127.5, std=127.5)
return torchvision.transforms.Lambda(lambda x: (x.float() - 127.5) / 127.5)
@property
def transforms1(self):
"""Transforms applied after permutation"""
if self.tokenize:
return torchvision.transforms.Lambda(lambda x: x.squeeze(-1))
else:
return torchvision.transforms.Compose([])
def setup(self):
transforms = [
torchvision.transforms.ToTensor(), # (B, C, H, W)
Rearrange("c h w -> h w c"), # (B, H, W, C)
torchvision.transforms.Lambda(
lambda x: (x * 255).long()
), # Convert back to ints
]
transform = torchvision.transforms.Compose(transforms)
self.dataset_train = torchvision.datasets.CIFAR10(
f"{default_data_path}/cifar",
train=True,
download=True,
transform=transform,
)
self.dataset_test = torchvision.datasets.CIFAR10(
f"{default_data_path}/cifar", train=False, transform=transform
)
self.split_train_val(self.val_split)
def collate_batch(batch):
"""batch: list of (x, y) pairs"""
inputs, labels = zip(*batch)
x = torch.stack(inputs, dim=0)
z = torch.LongTensor(labels)
y = self.permute(x)
x = self.transforms0(x)
x = self.permute(x)
x = F.pad(x[:, :-1, :], (0, 0, 1, 0))
x = self.transforms1(x)
return x, y, z
self.collate_fn = collate_batch
def __str__(self): # TODO not updated
return f"{self._name_}"
class CIFAR10GenerationFactored(CIFAR10Generation):
"""Version of CIFAR-10 Density Estimation that keeps the sequence of length 1024 and factors the distribution over the 3 channels"""
_name_ = "cifargenf"
l_output = 1024 # Leaving this out or setting to None also works, to indicate that the entire length dimension is kept
@property
def init_defaults(self):
return {
"mixture": 0,
"val_split": 0.02,
"seed": 42,
}
@property
def d_input(self):
return 3
@property
def d_output(self):
return 3 * 256 if self.mixture == 0 else 10 * self.mixture
@property
def permute(self):
return lambda x: rearrange(x, "... h w c -> ... (h w) c")
@property
def transforms0(self):
return torchvision.transforms.Lambda(lambda x: (x.float() - 127.5) / 127.5)
# return torchvision.transforms.Normalize(mean=0.5, std=0.5)
@property
def transforms1(self):
return torchvision.transforms.Compose([])
class HMDB51(SequenceDataset):
# TODO(KG): refactor this dataset with new SequenceDataset structure
_name_ = "hmdb51"
d_input = 3
d_output = 51
l_output = 0
init_defaults = {
"split_dir": "test_train_splits", # path to splits
"video_dir": "videos", # path to videos
"clip_duration": 2, # Duration of sampled clip for each video, just the upper bound
"num_frames": 16, # frames per clip
"frame_size": 112, # square shape of image to use
"use_ddp": False, # using a distributed sampler / not
"num_gpus": 1,
"split_id": 1, # 1, 2, or 3
"val_split": 0.1, # split train into val also
"augment": "default", # which type of augment to use, "default" | "randaug" | "augmix"
# "num_rand_augments": 3, # num of random augmentations to use
# "use_augmix": False
}
def split_train_val(self, val_split):
"""
Child class needs to handle getting length of dataset differently.
"""
train_len = int(self.dataset_train.num_videos * (1.0 - val_split))
self.dataset_train, self.dataset_val = random_split(
self.dataset_train,
(train_len, self.dataset_train.num_videos - train_len),
generator=torch.Generator().manual_seed(
getattr(self, "seed", 42)
), # PL is supposed to have a way to handle seeds properly, but doesn't seem to work for us
)
def find_classes(self, directory):
"""Finds the class folders in a dataset.
See :class:`DatasetFolder` for details.
"""
classes = sorted(entry.name for entry in os.scandir(directory) if entry.is_dir())
if not classes:
raise FileNotFoundError(f"Couldn't find any class folder in {directory}.")
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def setup(self):
# for video datasets
import pytorch_lightning
import pytorchvideo.data
import torch.utils.data
from torch.utils.data import DistributedSampler, RandomSampler
self.pytorchvideo = pytorchvideo.data
self.RandomSampler = RandomSampler
self.DistributedSampler = DistributedSampler
from pytorchvideo.transforms import (ApplyTransformToKey, AugMix,
Normalize, Permute, RandAugment,
RandomShortSideScale, RemoveKey,
ShortSideScale,
UniformTemporalSubsample)
from torchvision.transforms import (CenterCrop, Compose, Lambda,
RandomCrop, RandomHorizontalFlip,
Resize)
self.split_path = self.data_dir or default_data_path / self._name_
self.split_path = os.path.join(self.split_path, self.split_dir)
self.video_path = self.data_dir or default_data_path / self._name_
self.video_path = os.path.join(self.video_path, self.video_dir)
# # sampler = RandomSampler # hardcode, ddp handled by PTL
# sampler = DistributedSampler if self.num_gpus > 1 else RandomSampler
# print("sampler chosen!", sampler)
# means = (0.43216, 0.394666, 0.37645)
# stds = (0.22803, 0.22145, 0.216989)
means = (110.2, 100.64, 96.0)
stds = (58.14765, 56.46975, 55.332195)
train_transform_list = []
train_transform_list += [UniformTemporalSubsample(self.num_frames),
Lambda(lambda x: x / 255.0),
Normalize(means, stds)]
if self.augment == "randaug": aug_paras = self.randaug
elif self.augment == "augmix": aug_paras = self.augmix
else: aug_paras = None
self.train_transform = pytorchvideo.transforms.create_video_transform(
mode="train",
video_key="video",
num_samples=self.num_frames,
convert_to_float=False,
video_mean=means,
video_std=stds,
min_size=256, # for ShortSideScale
crop_size=self.frame_size,
aug_type=self.augment,
aug_paras=aug_paras,
)
self.test_transform = pytorchvideo.transforms.create_video_transform(
mode="val",
video_key="video",
num_samples=self.num_frames,
convert_to_float=False,
video_mean=means,
video_std=stds,
min_size=256, # for ShortSideScale
crop_size=self.frame_size,
aug_type=self.augment,
aug_paras=aug_paras,
)
# get list of classes, and class_to_idx, to convert class str to int val
self.classes, self.class_to_idx = self.find_classes(self.video_path)
# @staticmethod
def collate_batch(batch, resolution=1):
videos, str_labels, video_idxs = zip(
*[
(data["video"], data["label"], data["video_index"])
for data in batch
]
)
# need to convert label string to int, and then to tensors
int_labels = [torch.tensor(self.class_to_idx[label]) for label in str_labels]
video_idx_labels = [torch.tensor(label) for label in video_idxs] # just convert to tensor
xs = torch.stack(videos) # shape = [b, c, t, h, w]
ys = torch.stack(int_labels)
video_idxs = torch.stack(video_idx_labels)
return xs, (ys, video_idxs)
self.collate_fn = collate_batch
def train_dataloader(self, **kwargs):
"""Need to overide so that we don't pass the shuffle=True parameter"""
sampler = self.DistributedSampler if self.num_gpus > 1 else self.RandomSampler
self.dataset_train = self.pytorchvideo.Hmdb51(
data_path=self.split_path,
video_path_prefix=self.video_path,
clip_sampler=self.pytorchvideo.make_clip_sampler("random", self.clip_duration),
decode_audio=False,
split_id=self.split_id,
split_type="train",
transform=self.train_transform,
video_sampler=sampler
)
return torch.utils.data.DataLoader(
self.dataset_train,
collate_fn=self.collate_fn,
**kwargs,
)
def val_dataloader(self, **kwargs):
kwargs['drop_last'] = False
sampler = partial(self.DistributedSampler, drop_last=kwargs['drop_last']) if self.num_gpus > 1 else self.RandomSampler
self.dataset_val = self.pytorchvideo.Hmdb51(
data_path=self.split_path,
video_path_prefix=self.video_path,
clip_sampler=self.pytorchvideo.make_clip_sampler("uniform", self.clip_duration),
decode_audio=False,
split_id=self.split_id,
split_type="test",
transform=self.test_transform,
video_sampler=sampler
)
return torch.utils.data.DataLoader(
self.dataset_val,
collate_fn=self.collate_fn,
**kwargs,
)
def test_dataloader(self, **kwargs):
kwargs['drop_last'] = False
sampler = partial(self.DistributedSampler, drop_last=kwargs['drop_last']) if self.num_gpus > 1 else self.RandomSampler
self.dataset_test = self.pytorchvideo.Hmdb51(
data_path=self.split_path,
video_path_prefix=self.video_path,
clip_sampler=self.pytorchvideo.make_clip_sampler("uniform", self.clip_duration),
decode_audio=False,
split_id=self.split_id,
split_type="test",
transform=self.test_transform,
video_sampler=sampler
)
return torch.utils.data.DataLoader(
self.dataset_test,
collate_fn=self.collate_fn,
**kwargs,
)
class ImageNet(SequenceDataset):
"""
.. figure:: https://3qeqpr26caki16dnhd19sv6by6v-wpengine.netdna-ssl.com/wp-content/uploads/2017/08/
Sample-of-Images-from-the-ImageNet-Dataset-used-in-the-ILSVRC-Challenge.png
:width: 400
:alt: Imagenet
Specs:
- 1000 classes
- Each image is (3 x varies x varies) (here we default to 3 x 224 x 224)
Imagenet train, val and test dataloaders.
The train set is the imagenet train.
The val split is taken from train if a val_split % is provided, or will be the same as test otherwise
The test set is the official imagenet validation set.
"""
_name_ = "imagenet"
d_input = 3
d_output = 1000
l_output = 0
init_defaults = {
"data_dir": None,
"cache_dir": None,
"image_size": 224,
"val_split": None, # currently not implemented
"train_transforms": None,
"val_transforms": None,
"test_transforms": None,
"mixup": None, # augmentation
"num_aug_repeats": 0,
"num_gpus": 1,
"shuffle": True, # for train
"loader_fft": False,
}
@property
def num_classes(self) -> int:
"""
Return:
1000
"""
return 1000
def _verify_splits(self, data_dir: str, split: str) -> None:
dirs = os.listdir(data_dir)
if split not in dirs:
raise FileNotFoundError(
f"a {split} Imagenet split was not found in {data_dir},"
f" make sure the folder contains a subfolder named {split}"
)
def prepare_data(self) -> None:
"""This method already assumes you have imagenet2012 downloaded. It validates the data using the meta.bin.
.. warning:: Please download imagenet on your own first.
"""
if not self.use_archive_dataset:
self._verify_splits(self.data_dir, "train")
self._verify_splits(self.data_dir, "val")
else:
if not self.data_dir.is_file():
raise FileNotFoundError(f"""Archive file {str(self.data_dir)} not found.""")
def setup(self, stage=None):
"""Creates train, val, and test dataset."""
from typing import Any, Callable, List, Optional, Union
import hydra # for mixup
from pl_bolts.transforms.dataset_normalizations import \
imagenet_normalization
from torch.utils.data import Dataset
from torch.utils.data.dataloader import default_collate
from torchvision.datasets import ImageFolder
# for access in other methods
self.imagenet_normalization = imagenet_normalization
self.default_collate = default_collate
self.hydra = hydra
self.ImageFolder = ImageFolder
if self.mixup is not None:
self.mixup_fn = hydra.utils.instantiate(self.mixup)
else:
self.mixup_fn = None
self.dir_path = self.data_dir or default_data_path / self._name_
if stage == "fit" or stage is None:
self.set_phase([self.image_size])
# train_transforms = (self.train_transform() if self.train_transforms is None
# else hydra.utils.instantiate(self.train_transforms))
# val_transforms = (self.val_transform() if self.val_transforms is None
# else hydra.utils.instantiate(self.val_transforms))
# self.dataset_train = ImageFolder(self.dir_path / 'val', # modded
# transform=train_transforms)
# if self.val_split > 0.:
# # this will create the val split
# self.split_train_val(self.val_split)
# # will use the test split as val by default
# else:
# self.dataset_val = ImageFolder(self.dir_path / 'val', transform=val_transforms)
# # modded, override (for debugging)
# self.dataset_train = self.dataset_val
if stage == "test" or stage is None:
test_transforms = (self.val_transform() if self.test_transforms is None
else hydra.utils.instantiate(self.test_transforms))
self.dataset_test = ImageFolder(os.path.join(self.dir_path, 'val'), transform=test_transforms)
# # modded, override (for debugging)
# self.dataset_test = self.dataset_val
def set_phase(self, stage_params=[224], val_upsample=False, test_upsample=False):
"""
For progresive learning.
Will modify train transform parameters during training, just image size for now,
and create a new train dataset, which the train_dataloader will load every
n epochs (in config).
Later, will be possible to change magnitude of RandAug here too, and mixup alpha
stage_params: list, list of values to change. single [image_size] for now
"""
img_size = int(stage_params[0])
# self.train_transforms["input_size"] = img_size
if val_upsample:
self.val_transforms["input_size"] = img_size
train_transforms = (self.train_transform() if self.train_transforms is None
else self.hydra.utils.instantiate(self.train_transforms))
val_transforms = (self.val_transform() if self.val_transforms is None
else self.hydra.utils.instantiate(self.val_transforms))
if self.loader_fft:
train_transforms = torchvision.transforms.Compose(
train_transforms.transforms + [
torchvision.transforms.Lambda(lambda x: torch.fft.rfftn(x, s=tuple([2*l for l in x.shape[1:]])))
]
)
val_transforms = torchvision.transforms.Compose(
val_transforms.transforms + [
torchvision.transforms.Lambda(lambda x: torch.fft.rfftn(x, s=tuple([2*l for l in x.shape[1:]])))
]
)
self.dataset_train = self.ImageFolder(self.dir_path / 'train',
transform=train_transforms)
if self.val_split > 0.:
# this will create the val split
self.split_train_val(self.val_split)
# will use the test split as val by default
else:
self.dataset_val = self.ImageFolder(self.dir_path / 'val', transform=val_transforms)
# # modded, override (for debugging)
# self.dataset_train = self.dataset_val
# not sure if normally you upsample test also
if test_upsample:
self.test_transforms["input_size"] = img_size
test_transforms = (self.val_transform() if self.test_transforms is None
else self.hydra.utils.instantiate(self.test_transforms))
self.dataset_test = self.ImageFolder(os.path.join(self.dir_path, 'val'), transform=test_transforms)
## modded, override (for debugging)
# self.dataset_test = self.dataset_val
# could modify mixup by reinstantiating self.mixup_fn (later maybe)
def train_transform(self):
"""The standard imagenet transforms.
.. code-block:: python
transforms.Compose([
transforms.RandomResizedCrop(self.image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
"""
preprocessing = torchvision.transforms.Compose(
[
torchvision.transforms.RandomResizedCrop(self.image_size),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
self.imagenet_normalization(),
]
)
return preprocessing
def val_transform(self):
"""The standard imagenet transforms for validation.
.. code-block:: python
transforms.Compose([
transforms.Resize(self.image_size + 32),
transforms.CenterCrop(self.image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
"""
preprocessing = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(self.image_size + 32),
torchvision.transforms.CenterCrop(self.image_size),
torchvision.transforms.ToTensor(),
self.imagenet_normalization(),
]
)
return preprocessing
# def train_dataloader(self, train_resolution, eval_resolutions, **kwargs):
# """ The train dataloader """
# return (self._data_loader(self.dataset_train, shuffle=True, mixup=self.mixup_fn, **kwargs))
def train_dataloader(self, **kwargs):
""" The train dataloader """
if self.num_aug_repeats == 0 or self.num_gpus == 1:
shuffle = self.shuffle
sampler = None
else:
shuffle = False
from timm.data.distributed_sampler import RepeatAugSampler
sampler = RepeatAugSampler(self.dataset_train, num_repeats=self.num_aug_repeats)
# calculate resolution
resolution = self.image_size / self.train_transforms['input_size'] # usually 1.0
return (self._data_loader(self.dataset_train, shuffle=shuffle, mixup=self.mixup_fn, sampler=sampler, resolution=resolution, **kwargs))
def val_dataloader(self, **kwargs):
""" The val dataloader """
kwargs['drop_last'] = False
# update batch_size for eval if provided
batch_size = kwargs.get("batch_size_eval", None) or kwargs.get("batch_size")
kwargs["batch_size"] = batch_size
# calculate resolution
resolution = self.image_size / self.val_transforms['input_size'] # usually 1.0 or 0.583
return (self._data_loader(self.dataset_val, resolution=resolution, **kwargs))
def test_dataloader(self, **kwargs):
""" The test dataloader """
kwargs['drop_last'] = False
# update batch_size for test if provided
batch_size = kwargs.get("batch_size_test", None) or kwargs.get("batch_size_eval", None) or kwargs.get("batch_size")
kwargs["batch_size"] = batch_size
# calculate resolution
resolution = self.image_size / self.test_transforms.get("input_size", self.val_transforms['input_size'])
return (self._data_loader(self.dataset_test, resolution=resolution, **kwargs))
def _data_loader(self, dataset, resolution, shuffle=False, mixup=None, sampler=None, **kwargs):
# collate_fn = (lambda batch: mixup(*self.default_collate(batch))) if mixup is not None else self.default_collate
collate_fn = (lambda batch: mixup(*self.collate_with_resolution(batch, resolution))) if mixup is not None else lambda batch: self.collate_with_resolution(batch, resolution)
# hacked - can't pass this this arg to dataloader, but used to update the batch_size val / test
kwargs.pop('batch_size_eval', None)
kwargs.pop('batch_size_test', None)
return torch.utils.data.DataLoader(
dataset,
collate_fn=collate_fn,
shuffle=shuffle,
sampler=sampler,
**kwargs,
)
def collate_with_resolution(self, batch, resolution):
stuff = self.default_collate(batch)
return *stuff, {"resolution": resolution}
# def _data_loader(self, dataset, mixup=None, **kwargs):
# collate_fn = (lambda batch: mixup(*self.default_collate(batch))) if mixup is not None else self.default_collate
# return torch.utils.data.DataLoader(
# dataset, collate_fn=collate_fn, **kwargs
# )
class ImageNetA(ImageNet):
_name_ = 'imagenet-a'
init_defaults = {
'transforms': None,
}
def setup(self):
from pl_bolts.transforms.dataset_normalizations import \
imagenet_normalization
from torch.utils.data.dataloader import default_collate
from torchvision.datasets import ImageFolder
self.imagenet_normalization = imagenet_normalization
self.default_collate = default_collate
self.ImageFolder = ImageFolder
self.dir_path = self.data_dir or default_data_path / self._name_
# self.transforms["input_size"] = 224
transforms = (
self.val_transform() if self.transforms is None
else self.hydra.utils.instantiate(self.transforms)
)
self.dataset_train = None
self.dataset_val = None
self.dataset_test = self.ImageFolder(self.dir_path, transform=transforms)
class ImageNetR(ImageNetA):
_name_ = 'imagenet-r'
class ImageNetC(ImageNet):
_name_ = 'imagenet-c'
init_defaults = {
'transforms': None,
}
def setup(self):
from pl_bolts.transforms.dataset_normalizations import \
imagenet_normalization
from torch.utils.data.dataloader import default_collate
from torchvision.datasets import ImageFolder
self.imagenet_normalization = imagenet_normalization
self.default_collate = default_collate
self.ImageFolder = ImageFolder
self.dir_path = self.data_dir or default_data_path / self._name_
# self.transforms["input_size"] = 224
transforms = (
self.val_transform() if self.transforms is None
else self.hydra.utils.instantiate(self.transforms)
)
variants = [os.listdir(self.dir_path)][0]
subvariants = {variant: os.listdir(os.path.join(self.dir_path, variant)) for variant in variants}
self.dataset_test = {
f'{variant + "/" + subvariant}': self.ImageFolder(
os.path.join(os.path.join(self.dir_path, variant), subvariant),
transform=transforms,
)
for variant in variants
for subvariant in subvariants[variant]
}
self.dataset_train = None
self.dataset_val = None
# self.dataset_test = self.ImageFolder(self.dir_path, transform=transforms)
def val_dataloader(self, **kwargs):
"""Using the same dataloader as test, a hack for zero shot eval without training"""
kwargs['drop_last'] = False
kwargs["batch_size"] = kwargs.get("batch_size_eval", None) or kwargs.get("batch_size")
return {
name: self._data_loader(dataset, resolution=1, **kwargs)
for name, dataset in self.dataset_test.items()
}
def test_dataloader(self, **kwargs):
kwargs['drop_last'] = False
kwargs["batch_size"] = kwargs.get("batch_size_eval", None) or kwargs.get("batch_size")
return {
name: self._data_loader(dataset, resolution=1, **kwargs)
for name, dataset in self.dataset_test.items()
}
class ImageNetP(ImageNet):
_name_ = 'imagenet-p'
init_defaults = {
'transforms': None,
}
def setup(self):
from pl_bolts.transforms.dataset_normalizations import \
imagenet_normalization
from src.dataloaders.utils.video_loader import VideoFolder
from torch.utils.data.dataloader import default_collate
self.imagenet_normalization = imagenet_normalization
self.default_collate = default_collate
self.VideoFolder = VideoFolder
self.dir_path = self.data_dir or default_data_path / self._name_
# self.transforms["input_size"] = 224
transforms = (
self.val_transform() if self.transforms is None
else self.hydra.utils.instantiate(self.transforms)
)
variants = os.listdir(self.dir_path)
# subvariants = {variant: os.listdir(os.path.join(self.dir_path, variant)) for variant in variants}
self.dataset_test = {
f'{variant}': self.VideoFolder(
os.path.join(self.dir_path, variant),
transform=transforms,
)
for variant in variants
# for subvariant in subvariants[variant]
}
self.dataset_train = None
self.dataset_val = None
# self.dataset_test = self.ImageFolder(self.dir_path, transform=transforms)
def val_dataloader(self, train_resolution, eval_resolutions, **kwargs):
"""Using the same dataloader as test, a hack for zero shot eval without training"""
kwargs['drop_last'] = False
kwargs["batch_size"] = kwargs.get("batch_size_eval", None) or kwargs.get("batch_size")
return {
name: self._data_loader(dataset, **kwargs)
for name, dataset in self.dataset_test.items()
}
def test_dataloader(self, train_resolution, eval_resolutions, **kwargs):
kwargs['drop_last'] = False
kwargs["batch_size"] = kwargs.get("batch_size_eval", None) or kwargs.get("batch_size")
return {
name: self._data_loader(dataset, **kwargs)
for name, dataset in self.dataset_test.items()
}
| state-spaces-main | src/dataloaders/vision.py |
"""ET Dataset from Informer Paper.
Dataset: https://github.com/zhouhaoyi/ETDataset
Dataloader: https://github.com/zhouhaoyi/Informer2020
"""
from typing import List
import os
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from src.dataloaders.base import SequenceDataset, default_data_path
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
def time_features(dates, timeenc=1, freq="h"):
"""
> `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0:
> * m - [month]
> * w - [month]
> * d - [month, day, weekday]
> * b - [month, day, weekday]
> * h - [month, day, weekday, hour]
> * t - [month, day, weekday, hour, *minute]
>
> If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]):
> * Q - [month]
> * M - [month]
> * W - [Day of month, week of year]
> * D - [Day of week, day of month, day of year]
> * B - [Day of week, day of month, day of year]
> * H - [Hour of day, day of week, day of month, day of year]
> * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
> * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]
*minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
"""
if timeenc == 0:
dates["month"] = dates.date.apply(lambda row: row.month, 1)
dates["day"] = dates.date.apply(lambda row: row.day, 1)
dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1)
dates["hour"] = dates.date.apply(lambda row: row.hour, 1)
dates["minute"] = dates.date.apply(lambda row: row.minute, 1)
dates["minute"] = dates.minute.map(lambda x: x // 15)
freq_map = {
"y": [],
"m": ["month"],
"w": ["month"],
"d": ["month", "day", "weekday"],
"b": ["month", "day", "weekday"],
"h": ["month", "day", "weekday", "hour"],
"t": ["month", "day", "weekday", "hour", "minute"],
}
return dates[freq_map[freq.lower()]].values
if timeenc == 1:
dates = pd.to_datetime(dates.date.values)
return np.vstack(
[feat(dates) for feat in time_features_from_frequency_str(freq)]
).transpose(1, 0)
class StandardScaler:
def __init__(self):
self.mean = 0.0
self.std = 1.0
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data - mean) / std
def inverse_transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data * std) + mean
class InformerDataset(Dataset):
def __init__(
self,
root_path,
flag="train",
size=None,
features="S",
data_path="ETTh1.csv",
target="OT",
scale=True,
inverse=False,
timeenc=0,
freq="h",
cols=None,
eval_stamp=False,
eval_mask=False,
):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ["train", "test", "val"]
type_map = {"train": 0, "val": 1, "test": 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols = cols
self.eval_stamp = eval_stamp
self.eval_mask = eval_mask
self.forecast_horizon = self.pred_len
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def _borders(self, df_raw):
num_train = int(len(df_raw) * 0.7)
num_test = int(len(df_raw) * 0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
return border1s, border2s
def _process_columns(self, df_raw):
if self.cols:
cols = self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns)
cols.remove(self.target)
cols.remove("date")
return df_raw[["date"] + cols + [self.target]]
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
df_raw = self._process_columns(df_raw)
border1s, border2s = self._borders(df_raw)
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features == "M" or self.features == "MS":
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == "S":
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0] : border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[["date"]][border1:border2]
df_stamp["date"] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_x = np.concatenate(
[seq_x, np.zeros((self.pred_len, self.data_x.shape[-1]))], axis=0
)
if self.inverse:
seq_y = np.concatenate(
[
self.data_x[r_begin : r_begin + self.label_len],
self.data_y[r_begin + self.label_len : r_end],
],
0,
)
raise NotImplementedError
else:
# seq_y = self.data_y[r_begin:r_end] # OLD in Informer codebase
seq_y = self.data_y[s_end:r_end]
# OLD in Informer codebase
# seq_x_mark = self.data_stamp[s_begin:s_end]
# seq_y_mark = self.data_stamp[r_begin:r_end]
if self.eval_stamp:
mark = self.data_stamp[s_begin:r_end]
else:
mark = self.data_stamp[s_begin:s_end]
mark = np.concatenate([mark, np.zeros((self.pred_len, mark.shape[-1]))], axis=0)
if self.eval_mask:
mask = np.concatenate([np.zeros(self.seq_len), np.ones(self.pred_len)], axis=0)
else:
mask = np.concatenate([np.zeros(self.seq_len), np.zeros(self.pred_len)], axis=0)
mask = mask[:, None]
# Add the mask to the timestamps: # 480, 5
# mark = np.concatenate([mark, mask[:, np.newaxis]], axis=1)
seq_x = seq_x.astype(np.float32)
seq_y = seq_y.astype(np.float32)
if self.timeenc == 0:
mark = mark.astype(np.int64)
else:
mark = mark.astype(np.float32)
mask = mask.astype(np.int64)
return torch.tensor(seq_x), torch.tensor(seq_y), torch.tensor(mark), torch.tensor(mask)
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
@property
def d_input(self):
return self.data_x.shape[-1]
@property
def d_output(self):
if self.features in ["M", "S"]:
return self.data_x.shape[-1]
elif self.features == "MS":
return 1
else:
raise NotImplementedError
@property
def n_tokens_time(self):
if self.freq == 'h':
return [13, 32, 7, 24]
elif self.freq == 't':
return [13, 32, 7, 24, 4]
else:
raise NotImplementedError
class _Dataset_ETT_hour(InformerDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 - self.seq_len,
12 * 30 * 24 + 4 * 30 * 24 - self.seq_len,
]
border2s = [
12 * 30 * 24,
12 * 30 * 24 + 4 * 30 * 24,
12 * 30 * 24 + 8 * 30 * 24,
]
return border1s, border2s
def _process_columns(self, df_raw):
return df_raw
@property
def n_tokens_time(self):
assert self.freq == "h"
return [13, 32, 7, 24]
class _Dataset_ETT_minute(_Dataset_ETT_hour):
def __init__(self, data_path="ETTm1.csv", freq="t", **kwargs):
super().__init__(data_path=data_path, freq=freq, **kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 * 4 - self.seq_len,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len,
]
border2s = [
12 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 8 * 30 * 24 * 4,
]
return border1s, border2s
@property
def n_tokens_time(self):
assert self.freq == "t"
return [13, 32, 7, 24, 4]
class _Dataset_Weather(InformerDataset):
def __init__(self, data_path="WTH.csv", target="WetBulbCelsius", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_ECL(InformerDataset):
def __init__(self, data_path="ECL.csv", target="MT_320", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class InformerSequenceDataset(SequenceDataset):
@property
def n_tokens_time(self):
# Shape of the dates: depends on `timeenc` and `freq`
return self.dataset_train.n_tokens_time # data_stamp.shape[-1]
@property
def d_input(self):
return self.dataset_train.d_input
@property
def d_output(self):
return self.dataset_train.d_output
@property
def l_output(self):
return self.dataset_train.pred_len
def _get_data_filename(self, variant):
return self.variants[variant]
_collate_arg_names = ["mark", "mask"] # Names of the two extra tensors that the InformerDataset returns
def setup(self):
self.data_dir = self.data_dir or default_data_path / 'informer' / self._name_
self.dataset_train = self._dataset_cls(
root_path=self.data_dir,
flag="train",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_val = self._dataset_cls(
root_path=self.data_dir,
flag="val",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_test = self._dataset_cls(
root_path=self.data_dir,
flag="test",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
class ETTHour(InformerSequenceDataset):
_name_ = "etth"
_dataset_cls = _Dataset_ETT_hour
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ETTh1.csv",
1: "ETTh2.csv",
}
class ETTMinute(InformerSequenceDataset):
_name_ = "ettm"
_dataset_cls = _Dataset_ETT_minute
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "t",
"cols": None,
}
variants = {
0: "ETTm1.csv",
1: "ETTm2.csv",
}
class Weather(InformerSequenceDataset):
_name_ = "weather"
_dataset_cls = _Dataset_Weather
init_defaults = {
"size": None,
"features": "S",
"target": "WetBulbCelsius",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "WTH.csv",
}
class ECL(InformerSequenceDataset):
_name_ = "ecl"
_dataset_cls = _Dataset_ECL
init_defaults = {
"size": None,
"features": "S",
"target": "MT_320",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ECL.csv",
}
| state-spaces-main | src/dataloaders/et.py |
from . import audio, basic, et, lm, lra, synthetic, ts, vision
from .base import SequenceDataset
| state-spaces-main | src/dataloaders/__init__.py |
"""Time series datasets, especially for medical time series."""
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from src.dataloaders.base import default_data_path, SequenceDataset, deprecated
class BIDMC(SequenceDataset):
"""BIDMC datasets for Respiratory Rate / Heart Rate / Oxygen Saturation regression"""
_name_ = "bidmc"
d_input = 2
@property
def d_output(self):
return 2 if self.prediction else 1
@property
def l_output(self):
return 4000 if self.prediction else 0
@property
def init_defaults(self):
return {
"target": "RR", # 'RR' | 'HR' | 'SpO2'
"prediction": False,
"reshuffle": True,
}
def setup(self):
self.data_dir = self.data_dir or default_data_path / self._name_
split = "reshuffle" if self.reshuffle else "original"
# X: (dataset_size, length, d_input)
# y: (dataset_size)
X_train = np.load(self.data_dir / self.target / split / "trainx.npy")
y_train = np.load(self.data_dir / self.target / split / "trainy.npy")
X_val = np.load(self.data_dir / self.target / split / "validx.npy")
y_val = np.load(self.data_dir / self.target / split / "validy.npy")
X_test = np.load(self.data_dir / self.target / split / "testx.npy")
y_test = np.load(self.data_dir / self.target / split / "testy.npy")
if self.prediction:
y_train = np.pad(X_train[:, 1:, :], ((0, 0), (0, 1), (0, 0)))
y_val = np.pad(X_val[:, 1:, :], ((0, 0), (0, 1), (0, 0)))
y_test = np.pad(X_test[:, 1:, :], ((0, 0), (0, 1), (0, 0)))
self.dataset_train = torch.utils.data.TensorDataset(
torch.FloatTensor(X_train), torch.FloatTensor(y_train)
)
self.dataset_val = torch.utils.data.TensorDataset(
torch.FloatTensor(X_val), torch.FloatTensor(y_val)
)
self.dataset_test = torch.utils.data.TensorDataset(
torch.FloatTensor(X_test), torch.FloatTensor(y_test)
)
def __str__(self):
split = "reshuffle" if self.reshuffle else "original"
return f"BIDMC{self.target}_{split}"
class EegDataset(SequenceDataset):
_name_ = "eegseizure"
init_defaults = {
"l_output": 0,
"d_input": 19,
"d_output": 2,
"machine": "gemini",
"hospital": "stanford",
"clip_len": 60,
"stride": 60,
"offset": 0,
"ss_clip_len": 0,
"use_age": False,
"gnn": False,
"fft": False,
"rerun_meerkatdp": False,
"streaming_eval": False,
"sz_label_sensitivity": 1,
}
def setup(self):
import meerkat as mk
from meerkat.contrib.eeg import (build_stanford_eeg_dp,
build_streaming_stanford_eeg_dp,
build_tuh_eeg_dp)
from torch.utils.data import WeightedRandomSampler
assert self.sz_label_sensitivity <= self.clip_len
# from src.dataloaders.eegseizure import balance_dp, split_dp, merge_in_split
if self.machine == "gemini":
data_dir = "/media/4tb_hdd"
data_dir_tuh = "/media/nvme_data/siyitang/TUH_eeg_seq_v1.5.2"
raw_tuh_data_dir = "/media/nvme_data/TUH/v1.5.2"
elif self.machine == "zaman":
data_dir = "/data/ssd1crypt/datasets"
data_dir_tuh = "/data/ssd1crypt/datasets/TUH_v1.5.2"
raw_tuh_data_dir = data_dir_tuh
if self.hospital == "tuh":
dp = build_tuh_eeg_dp(
f"{data_dir_tuh}/resampled_signal",
f"{raw_tuh_data_dir}/edf",
clip_len=self.clip_len,
offset=self.offset,
ss_clip_len=self.ss_clip_len,
gnn=self.gnn,
skip_terra_cache=self.rerun_meerkatdp,
).load()
else:
dp = build_stanford_eeg_dp(
f"{data_dir}/eeg_data/stanford/stanford_mini",
f"{data_dir}/eeg_data/lpch/lpch",
"/home/ksaab/Documents/meerkat/meerkat/contrib/eeg/file_markers",
clip_len=self.clip_len,
offset=self.offset,
skip_terra_cache=self.rerun_meerkatdp,
).load()
if self.streaming_eval:
streaming_dp = build_streaming_stanford_eeg_dp(
f"{data_dir}/SEC-0.1/stanford",
f"{data_dir}/SEC-0.1/lpch",
"/data/crypt/eegdbs/SEC-0.1/SEC-0.1-sz-annotations-match-lvis",
clip_len=self.clip_len,
stride=self.stride,
sz_label_sensitivity=self.sz_label_sensitivity,
train_frac=0.0,
valid_frac=0.5,
test_frac=0.5,
skip_terra_cache=self.rerun_meerkatdp,
).load()
# remove patients in dp that are in streaming_dp
streaming_patients = streaming_dp["patient_id"].unique()
keep_patient_mask = np.array(
[patient not in streaming_patients for patient in dp["patient_id"]]
)
dp = dp.lz[keep_patient_mask]
# shuffle datapanel
np.random.seed(0)
ndxs = np.arange(len(dp))
np.random.shuffle(ndxs)
dp = dp.lz[ndxs]
val_split = "valid"
test_split = "test"
input_key = "input"
target_key = "target"
train_mask = dp["split"] == "train"
val_mask = dp["split"] == val_split
test_mask = dp["split"] == test_split
if self.fft:
input_key = "fft_input"
self.d_input = 1900
if self.ss_clip_len > 0:
target_key = "ss_output"
self.d_output = 19*100 #int(19 * (200* self.ss_clip_len / 2))
self.l_output = self.ss_clip_len
# train_mask = np.logical_and(train_mask.data,(dp["target"]==1).data)
# val_mask = np.logical_and(val_mask.data,(dp["target"]==1).data)
# test_mask = np.logical_and(test_mask.data,(dp["target"]==1).data)
self.dataset_train = dp.lz[train_mask][
input_key, target_key, "age", "target"
]
self.dataset_val = dp.lz[val_mask][
input_key, target_key, "age", "target"
]
self.dataset_test = dp.lz[test_mask][
input_key, target_key, "age"
]
# define whats returned by datasets
if self.gnn:
lambda_fnc = lambda x: (
x[input_key][0],
torch.tensor(x[target_key]).to(torch.long),
x[input_key][1], # graph supports
)
if self.ss_clip_len > 0:
lambda_fnc = lambda x: (
x[input_key][0],
torch.tensor(x[target_key][0]).to(torch.long),
torch.tensor(x[target_key][0]).to(torch.long), # decoder takes y as well
x[input_key][1], # graph supports
)
if self.use_age:
lambda_fnc = lambda x: (
x[input_key][0],
torch.tensor(x[target_key]).to(torch.long),
x[input_key][1], # graph supports
torch.tensor(x["age"]).to(torch.float),
)
else:
lambda_fnc = lambda x: (
x[input_key][0],
torch.tensor(x[target_key]).to(torch.long)
if self.ss_clip_len == 0
else x[target_key],
)
if self.use_age:
lambda_fnc = lambda x: (
x[input_key][0],
torch.tensor(x[target_key]).to(torch.long)
if self.ss_clip_len == 0
else x[target_key],
torch.tensor(x["age"]).to(torch.float),
)
self.dataset_train["examples"] = mk.LambdaColumn(self.dataset_train, lambda_fnc)
if self.ss_clip_len == 0:
# define train sampler
train_target = self.dataset_train["target"].data.astype(np.int)
class_sample_count = np.array(
[len(np.where(train_target == t)[0]) for t in np.unique(train_target)]
)
weight = 1.0 / class_sample_count
samples_weight = np.array([weight[t] for t in train_target])
samples_weight = torch.from_numpy(samples_weight)
samples_weight = samples_weight.double()
else:
samples_weight = torch.ones(len(self.dataset_train))
self.train_sampler = WeightedRandomSampler(samples_weight, len(samples_weight))
self.dataset_val["examples"] = mk.LambdaColumn(self.dataset_val, lambda_fnc)
self.dataset_test["examples"] = mk.LambdaColumn(self.dataset_test, lambda_fnc)
print(
f"Train:{len(self.dataset_train)} Validation:{len(self.dataset_val)} Test:{len(self.dataset_test)}"
)
if self.streaming_eval:
self.stream_dataset_val = streaming_dp.lz[streaming_dp["split"] == "valid"][
input_key, "target", "age", "clip_start"
]
self.stream_dataset_test = streaming_dp.lz[streaming_dp["split"] == "test"][
input_key, "target", "age", "clip_start"
]
self.stream_dataset_val["examples"] = mk.LambdaColumn(
self.stream_dataset_val,
lambda x: (
x[input_key],
torch.tensor(x["target"]).to(torch.long),
torch.tensor(x["age"]).to(torch.float),
torch.tensor(x["clip_start"]).to(torch.float),
),
)
self.stream_dataset_test["examples"] = mk.LambdaColumn(
self.stream_dataset_test,
lambda x: (
x[input_key],
torch.tensor(x["target"]).to(torch.long),
torch.tensor(x["age"]).to(torch.float),
torch.tensor(x["clip_start"]).to(torch.float),
),
)
def train_dataloader(self, train_resolution, eval_resolutions, **kwargs):
# No collate_fn is passed in: the default one does the right thing
return torch.utils.data.DataLoader(
self.dataset_train["examples"],
sampler=self.train_sampler,
**kwargs,
)
def val_dataloader(self, train_resolution, eval_resolutions, **kwargs):
# No collate_fn is passed in: the default one does the right thing
return torch.utils.data.DataLoader(
self.dataset_val["examples"],
**kwargs,
)
def test_dataloader(self, train_resolution, eval_resolutions, **kwargs):
# No collate_fn is passed in: the default one does the right thing
return torch.utils.data.DataLoader(
self.dataset_test["examples"],
**kwargs,
)
def stream_val_dataloader(self, train_resolution, eval_resolutions, **kwargs):
if self.streaming_eval:
# No collate_fn is passed in: the default one does the right thing
return torch.utils.data.DataLoader(
self.stream_dataset_val["examples"],
**kwargs,
)
def stream_test_dataloader(self, train_resolution, eval_resolutions, **kwargs):
if self.streaming_eval:
# No collate_fn is passed in: the default one does the right thing
return torch.utils.data.DataLoader(
self.stream_dataset_test["examples"],
**kwargs,
)
class PTBXL(SequenceDataset):
_name_ = "ptbxl"
init_defaults = {
"sampling_rate": 100,
"duration": 10,
"nleads": 12,
"ctype": "superdiagnostic",
"min_samples": 0,
}
@property
def d_input(self):
return self.nleads
def load_raw_data(self, df):
import wfdb
if self.sampling_rate == 100:
data = [wfdb.rdsamp(str(self.data_dir / f)) for f in df.filename_lr]
else:
data = [wfdb.rdsamp(str(self.data_dir / f)) for f in df.filename_hr]
data = np.array([signal for signal, meta in data])
return data
def setup(self):
self.data_dir = self.data_dir or default_data_path / self._name_
self.L = self.sampling_rate * self.duration
self.l_output = 0 # TODO(Priya): This changes with every multilabel setting?
# PTBXL imports
import ast
import pandas as pd
from sklearn import preprocessing
# load and convert annotation data
Y = pd.read_csv(self.data_dir / "ptbxl_database.csv", index_col="ecg_id")
Y.scp_codes = Y.scp_codes.apply(lambda x: ast.literal_eval(x))
# Load scp_statements.csv for diagnostic aggregation
agg_df = pd.read_csv(self.data_dir / "scp_statements.csv", index_col=0)
if self.ctype in [
"diagnostic",
"subdiagnostic",
"superdiagnostic",
"superdiagnostic_multiclass",
]:
agg_df = agg_df[agg_df.diagnostic == 1]
def aggregate_superdiagnostic_multiclass(y_dic):
lhmax = -1 # Superclass has the highest likelihood
superclass = ""
for key in y_dic.keys():
if key in agg_df.index and y_dic[key] > lhmax:
lhmax = y_dic[key]
superclass = agg_df.loc[key].diagnostic_class
return superclass
def aggregate_all_diagnostic(y_dic):
tmp = []
for key in y_dic.keys():
if key in agg_df.index:
tmp.append(key)
return list(set(tmp))
def aggregate_subdiagnostic(y_dic):
tmp = []
for key in y_dic.keys():
if key in agg_df.index:
c = agg_df.loc[key].diagnostic_subclass
if str(c) != "nan":
tmp.append(c)
return list(set(tmp))
def aggregate_superdiagnostic(y_dic):
tmp = []
for key in y_dic.keys():
if key in agg_df.index:
c = agg_df.loc[key].diagnostic_class
if str(c) != "nan":
tmp.append(c)
return list(set(tmp))
# Apply aggregation
if self.ctype == "superdiagnostic_multiclass":
Y["target"] = Y.scp_codes.apply(aggregate_superdiagnostic_multiclass)
elif self.ctype == "subdiagnostic":
Y["target"] = Y.scp_codes.apply(aggregate_subdiagnostic)
elif self.ctype == "superdiagnostic":
Y["target"] = Y.scp_codes.apply(aggregate_superdiagnostic)
elif self.ctype == "diagnostic":
Y["target"] = Y.scp_codes.apply(aggregate_all_diagnostic)
elif self.ctype in ["form", "rhythm"]:
if self.ctype == "form":
agg_df = agg_df[agg_df.form == 1]
else:
agg_df = agg_df[agg_df.rhythm == 1]
def aggregate_form_rhythm(y_dic):
tmp = []
for key in y_dic.keys():
if key in agg_df.index:
c = key
if str(c) != "nan":
tmp.append(c)
return list(set(tmp))
Y["target"] = Y.scp_codes.apply(aggregate_form_rhythm)
elif self.ctype == "all":
Y["target"] = Y.scp_codes.apply(lambda x: list(set(x.keys())))
counts = pd.Series(np.concatenate(Y.target.values)).value_counts()
counts = counts[counts > self.min_samples]
Y.target = Y.target.apply(
lambda x: list(set(x).intersection(set(counts.index.values)))
)
Y["target_len"] = Y.target.apply(lambda x: len(x))
Y = Y[Y.target_len > 0]
# Load raw signal data
X = self.load_raw_data(Y)
# Split data into train, val and test
val_fold = 9
test_fold = 10
# Convert labels to multiclass or multilabel targets
if self.ctype == "superdiagnostic_multiclass":
le = preprocessing.LabelEncoder()
else:
le = preprocessing.MultiLabelBinarizer()
le.fit(Y.target)
y = le.transform(Y.target)
self.d_output = len(le.classes_)
# Train
X_train = X[np.where((Y.strat_fold != val_fold) & (Y.strat_fold != test_fold))]
y_train = y[np.where((Y.strat_fold != val_fold) & (Y.strat_fold != test_fold))]
# Val
X_val = X[np.where(Y.strat_fold == val_fold)]
y_val = y[np.where(Y.strat_fold == val_fold)]
# Test
X_test = X[np.where(Y.strat_fold == test_fold)]
y_test = y[np.where(Y.strat_fold == test_fold)]
def preprocess_signals(X_train, X_validation, X_test):
# Standardize data such that mean 0 and variance 1
ss = preprocessing.StandardScaler()
ss.fit(np.vstack(X_train).flatten()[:, np.newaxis].astype(float))
return (
apply_standardizer(X_train, ss),
apply_standardizer(X_validation, ss),
apply_standardizer(X_test, ss),
)
def apply_standardizer(X, ss):
X_tmp = []
for x in X:
x_shape = x.shape
X_tmp.append(ss.transform(x.flatten()[:, np.newaxis]).reshape(x_shape))
X_tmp = np.array(X_tmp)
return X_tmp
X_train, X_val, X_test = preprocess_signals(X_train, X_val, X_test)
self.dataset_train = torch.utils.data.TensorDataset(
torch.tensor(X_train).to(torch.float), torch.tensor(y_train)
)
self.dataset_val = torch.utils.data.TensorDataset(
torch.tensor(X_val).to(torch.float), torch.tensor(y_val)
)
self.dataset_test = torch.utils.data.TensorDataset(
torch.tensor(X_test).to(torch.float), torch.tensor(y_test)
)
print(
f"Train:{len(X_train)} Validation:{len(X_val)} Test:{len(X_test)} Num_classes:{self.d_output}"
)
self.collate_fn = None
class IMU(SequenceDataset):
"""IMU (Inertial Measurement Units) dataset from an experimental study on Parkinson patients"""
_name_ = "imu"
d_input = 36 # len(imu_config)
l_output = 0
@property
def d_output(self):
return d_input if self.prediction else 2
@property
def init_defaults(self):
return {
#'target': 'RR', # 'RR' | 'HR' | 'SpO2'
"prediction": False,
"reshuffle": True,
}
def setup(self):
self.data_dir = self.data_dir or default_data_path / self._name_
self.collate_fn = None
split = "reshuffle" if self.reshuffle else "original"
# X: (dataset_size, length, d_input)
# y: (dataset_size)
# dictionary of config name to list of features
# choose sensors06_chest_lumbar_ankles_feet by default
# ignore this now as we're only using a fixed set of features
with open(self.data_dir / "sensor_configs.pkl", "rb") as config_f:
imu_config_map = pickle.load(config_f)
imu_config = imu_config_map["sensors06_chest_lumbar_ankles_feet"]
with open(self.data_dir / "0_train_matrices.pkl", "rb") as f_handle:
tr = pickle.load(f_handle)
with open(self.data_dir / "0_val_matrices.pkl", "rb") as f_handle:
val = pickle.load(f_handle)
with open(self.data_dir / "0_test_matrices.pkl", "rb") as f_handle:
te = pickle.load(f_handle)
X_train = tr[0]
y_train = tr[1].astype(int)
X_val = val[0]
y_val = val[1].astype(int)
X_test = te[0]
y_test = te[1].astype(int)
self.dataset_train = torch.utils.data.TensorDataset(
torch.FloatTensor(X_train), torch.tensor(y_train, dtype=torch.long)
)
self.dataset_val = torch.utils.data.TensorDataset(
torch.FloatTensor(X_val), torch.tensor(y_val, dtype=torch.long)
)
self.dataset_test = torch.utils.data.TensorDataset(
torch.FloatTensor(X_test), torch.tensor(y_test, dtype=torch.long)
)
def __str__(self):
split = "reshuffle" if self.reshuffle else "original"
return f"IMU_{split}"
| state-spaces-main | src/dataloaders/ts.py |
"""Implementation of basic benchmark datasets used in S4 experiments: MNIST, CIFAR10 and Speech Commands."""
import numpy as np
import torch
import torchvision
from einops.layers.torch import Rearrange
from src.utils import permutations
from src.dataloaders.base import default_data_path, ImageResolutionSequenceDataset, ResolutionSequenceDataset, SequenceDataset
class MNIST(SequenceDataset):
_name_ = "mnist"
d_input = 1
d_output = 10
l_output = 0
L = 784
@property
def init_defaults(self):
return {
"permute": True,
"val_split": 0.1,
"seed": 42, # For train/val split
}
def setup(self):
self.data_dir = self.data_dir or default_data_path / self._name_
transform_list = [
torchvision.transforms.ToTensor(),
torchvision.transforms.Lambda(lambda x: x.view(self.d_input, self.L).t()),
] # (L, d_input)
if self.permute:
# below is another permutation that other works have used
# permute = np.random.RandomState(92916)
# permutation = torch.LongTensor(permute.permutation(784))
permutation = permutations.bitreversal_permutation(self.L)
transform_list.append(
torchvision.transforms.Lambda(lambda x: x[permutation])
)
# TODO does MNIST need normalization?
# torchvision.transforms.Normalize((0.1307,), (0.3081,)) # normalize inputs
transform = torchvision.transforms.Compose(transform_list)
self.dataset_train = torchvision.datasets.MNIST(
self.data_dir,
train=True,
download=True,
transform=transform,
)
self.dataset_test = torchvision.datasets.MNIST(
self.data_dir,
train=False,
transform=transform,
)
self.split_train_val(self.val_split)
def __str__(self):
return f"{'p' if self.permute else 's'}{self._name_}"
class CIFAR10(ImageResolutionSequenceDataset):
_name_ = "cifar"
d_output = 10
l_output = 0
@property
def init_defaults(self):
return {
"permute": None,
"grayscale": False,
"tokenize": False, # if grayscale, tokenize into discrete byte inputs
"augment": False,
"cutout": False,
"rescale": None,
"random_erasing": False,
"val_split": 0.1,
"seed": 42, # For validation split
}
@property
def d_input(self):
if self.grayscale:
if self.tokenize:
return 256
else:
return 1
else:
assert not self.tokenize
return 3
def setup(self):
img_size = 32
if self.rescale:
img_size //= self.rescale
if self.grayscale:
preprocessors = [
torchvision.transforms.Grayscale(),
torchvision.transforms.ToTensor(),
]
permutations_list = [
torchvision.transforms.Lambda(
lambda x: x.view(1, img_size * img_size).t()
) # (L, d_input)
]
if self.tokenize:
preprocessors.append(
torchvision.transforms.Lambda(lambda x: (x * 255).long())
)
permutations_list.append(Rearrange("l 1 -> l"))
else:
preprocessors.append(
torchvision.transforms.Normalize(
mean=122.6 / 255.0, std=61.0 / 255.0
)
)
else:
preprocessors = [
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize(
(0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261)
),
]
permutations_list = [
torchvision.transforms.Lambda(
Rearrange("z h w -> (h w) z", z=3, h=img_size, w=img_size)
) # (L, d_input)
]
# Permutations and reshaping
if self.permute == "br":
permutation = permutations.bitreversal_permutation(img_size * img_size)
print("bit reversal", permutation)
permutations_list.append(torchvision.transforms.Lambda(lambda x: x[permutation]))
elif self.permute == "snake":
permutation = permutations.snake_permutation(img_size, img_size)
print("snake", permutation)
permutations_list.append(torchvision.transforms.Lambda(lambda x: x[permutation]))
elif self.permute == "hilbert":
permutation = permutations.hilbert_permutation(img_size)
print("hilbert", permutation)
permutations_list.append(torchvision.transforms.Lambda(lambda x: x[permutation]))
elif self.permute == "transpose":
permutation = permutations.transpose_permutation(img_size, img_size)
transform = torchvision.transforms.Lambda(
lambda x: torch.cat([x, x[permutation]], dim=-1)
)
permutations_list.append(transform)
elif self.permute == "2d": # h, w, c
permutation = torchvision.transforms.Lambda(
Rearrange("(h w) c -> h w c", h=img_size, w=img_size)
)
permutations_list.append(permutation)
elif self.permute == "2d_transpose": # c, h, w
permutation = torchvision.transforms.Lambda(
Rearrange("(h w) c -> c h w", h=img_size, w=img_size)
)
permutations_list.append(permutation)
# Augmentation
if self.augment:
augmentations = [
torchvision.transforms.RandomCrop(
img_size, padding=4, padding_mode="symmetric"
),
torchvision.transforms.RandomHorizontalFlip(),
]
post_augmentations = []
if self.cutout:
post_augmentations.append(Cutout(1, img_size // 2))
pass
if self.random_erasing:
# augmentations.append(RandomErasing())
pass
else:
augmentations, post_augmentations = [], []
transforms_train = (
augmentations + preprocessors + post_augmentations + permutations_list
)
transforms_eval = preprocessors + permutations_list
transform_train = torchvision.transforms.Compose(transforms_train)
transform_eval = torchvision.transforms.Compose(transforms_eval)
self.dataset_train = torchvision.datasets.CIFAR10(
f"{default_data_path}/{self._name_}",
train=True,
download=True,
transform=transform_train,
)
self.dataset_test = torchvision.datasets.CIFAR10(
f"{default_data_path}/{self._name_}", train=False, transform=transform_eval
)
if self.rescale:
print(f"Resizing all images to {img_size} x {img_size}.")
self.dataset_train.data = self.dataset_train.data.reshape((self.dataset_train.data.shape[0], 32 // self.rescale, self.rescale, 32 // self.rescale, self.rescale, 3)).max(4).max(2).astype(np.uint8)
self.dataset_test.data = self.dataset_test.data.reshape((self.dataset_test.data.shape[0], 32 // self.rescale, self.rescale, 32 // self.rescale, self.rescale, 3)).max(4).max(2).astype(np.uint8)
self.split_train_val(self.val_split)
def __str__(self):
return f"{'p' if self.permute else 's'}{self._name_}"
class SpeechCommands(ResolutionSequenceDataset):
_name_ = "sc"
@property
def init_defaults(self):
return {
"mfcc": False,
"dropped_rate": 0.0,
"length": 16000,
"all_classes": False,
}
@property
def d_input(self):
_d_input = 20 if self.mfcc else 1
_d_input += 1 if self.dropped_rate > 0.0 else 0
return _d_input
@property
def d_output(self):
return 10 if not self.all_classes else 35
@property
def l_output(self):
return 0
@property
def L(self):
return 161 if self.mfcc else self.length
def setup(self):
self.data_dir = self.data_dir or default_data_path # TODO make same logic as other classes
from src.dataloaders.datasets.sc import _SpeechCommands
# TODO refactor with data_dir argument
self.dataset_train = _SpeechCommands(
partition="train",
length=self.L,
mfcc=self.mfcc,
sr=1,
dropped_rate=self.dropped_rate,
path=self.data_dir,
all_classes=self.all_classes,
)
self.dataset_val = _SpeechCommands(
partition="val",
length=self.L,
mfcc=self.mfcc,
sr=1,
dropped_rate=self.dropped_rate,
path=self.data_dir,
all_classes=self.all_classes,
)
self.dataset_test = _SpeechCommands(
partition="test",
length=self.L,
mfcc=self.mfcc,
sr=1,
dropped_rate=self.dropped_rate,
path=self.data_dir,
all_classes=self.all_classes,
)
| state-spaces-main | src/dataloaders/basic.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements data loaders for language modeling (LM)."""
import logging
import os
import subprocess
from pathlib import Path
from typing import Optional, List, Tuple
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.utils import distributed
import src.utils.train
log = src.utils.train.get_logger(__name__)
from src.dataloaders.base import SequenceDataset, default_data_path
from src.dataloaders.utils.vocabulary import OpenAIVocab, Vocab
import src.utils as utils
# TODO: create a package so we don't have to mess with sys.path?
project_root = Path(__file__).parent.parent.absolute()
data_path = Path(__file__).absolute().parent / 'data'
import sys
sys.path.insert(0, str(project_root))
class LMOrderedIterator:
def __init__(
self,
data,
batch_size,
l_max,
batch_first=True,
n_context=1,
n_epoch_double=0,
pad_last=False,
roll_seed=None, # roll data based on seed
limit_tokens=1.0, # reduce tokens; useful for debugging last batch edge cases
):
"""
data -- LongTensor -- the LongTensor is strictly ordered
pad_last: whether to pad the last sequence in the batch so that all sequences
have the same length (l_max).
"""
self.raw_data = data
self.batch_size = batch_size
self.l_max = l_max
self.batch_first = batch_first
self.pad_last = pad_last
self.roll_seed = roll_seed
self.n_context = n_context
self.n_epoch_double = n_epoch_double
self.epoch = -1
# DDP
self.world_size = distributed.get_world_size()
self.rank = distributed.get_rank()
if limit_tokens is not None and 0.0 < limit_tokens < 1.0:
l_data = int(math.floor(data.size(-1) * limit_tokens))
self.raw_data = self.raw_data[:l_data]
self.process()
def process(self):
""" Process the data. All logic involving sequence length and batch size should go here """
assert self.l_max % self.n_context == 0
self.l_inc = self.l_max // self.n_context
global_batch_size = self.world_size * self.batch_size
# Work out how cleanly we can divide the dataset into batch_size parts.
n_step = self.raw_data.size(-1) // global_batch_size
# Trim off any extra elements that wouldn't cleanly fit (remainders).
self.data = self.raw_data[: n_step * global_batch_size]
# Evenly divide the data across the batches.
self.data = self.data.view(global_batch_size, -1).contiguous().pin_memory() # (global_batch_size, length)
# Partition data for DistributedDataParallel
self.data = self.data.chunk(self.world_size, dim=0)[self.rank]
# Number of mini-batches
# Need to subtract 1 because target is data shifted by 1
self.n_batch = (self.data.size(-1) - 1 + self.l_inc - 1) // self.l_inc
def roll(self, seed):
rng = torch.Generator()
rng.manual_seed(seed)
for i in range(self.data.size(0)):
row = self.data[i, :]
shift = torch.randint(0, self.data.size(-1), (1,), generator=rng)
row = torch.cat((row[shift:], row[:shift]))
self.data[i, :] = row
def get_batch(self, i):
""" Get batch starting at token index i """
end_idx = min(i + self.l_inc, self.data.size(-1)-1)
beg_idx = max(0, i + self.l_inc - self.l_max)
seq_len = end_idx - i
data = self.data[..., beg_idx:end_idx]
target = self.data[..., i+1 : end_idx+1]
if self.pad_last and seq_len < self.l_inc:
data = F.pad(data, (0, self.l_inc - seq_len)) # (batch_size, l_inc)
target = F.pad(target, (0, self.l_inc - seq_len))
seq_len = self.l_inc
if not self.batch_first:
data = data.transpose(0, 1).contiguous() # (n_batch, l_sequence)
target = target.transpose(0, 1).contiguous()
return data, target, {"l_output": seq_len} # Return length of desired output
def get_fixlen_iter(self, start=0):
if start != 0:
start += self.l_max
for i in range(start, self.data.size(-1) - 1, self.l_inc):
self.last_iter = i
yield self.get_batch(i)
def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): # NOTE: NOT TESTED
l_max = self.l_max + max_deviation * std
i = start
while True:
l_max = self.l_max if np.random.random() < 0.95 else self.l_max / 2.0
l_max = min(l_max, max(min_len, int(np.random.normal(l_max, std))))
data, target, seq_len = self.get_batch(i, l_max) # AG: this doesn't appear to work...
i += seq_len
yield data, target, seq_len
if i >= self.data.size(-1) - 2:
break
def __iter__(self):
self.epoch += 1
if (n := self.n_epoch_double) > 0 and self.epoch > 0 and self.epoch % n == 0:
if self.batch_size > 1:
log.info(f"LM Iterator doubling length from {self.l_max} to {self.l_max*2}")
self.l_max *= 2
self.batch_size //= 2
self.process()
if self.roll_seed is not None:
self.roll(self.roll_seed + self.epoch)
return self.get_fixlen_iter()
def __len__(self):
return self.n_batch
class LMShuffledIterator(object):
# NOTE: Not tested
def __init__(
self, data, batch_size, l_max, device="cpu", ext_len=None, shuffle=False
):
"""
data -- list[LongTensor] -- there is no order among the LongTensors
"""
self.data = data
self.batch_size = batch_size
self.l_max = l_max
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self):
# index iterator
epoch_indices = (
np.random.permutation(len(self.data))
if self.shuffle
else np.array(range(len(self.data)))
)
# sentence iterator
for idx in epoch_indices:
yield self.data[idx]
def stream_iterator(self, sent_stream):
# streams for each data in the batch
streams = [None] * self.batch_size
data = torch.LongTensor(self.l_max, self.batch_size)
target = torch.LongTensor(self.l_max, self.batch_size)
n_retain = 0
while True:
# data : [n_retain+l_max x batch_size]
# target : [l_max x batch_size]
data[n_retain:].fill_(-1)
target.fill_(-1)
valid_batch = True
for i in range(self.batch_size):
n_filled = 0
try:
while n_filled < self.l_max:
if streams[i] is None or len(streams[i]) <= 1:
streams[i] = next(sent_stream)
# number of new tokens to fill in
n_new = min(len(streams[i]) - 1, self.l_max - n_filled)
# first n_retain tokens are retained from last batch
data[
n_retain + n_filled : n_retain + n_filled + n_new,
i,
] = streams[i][:n_new]
target[n_filled : n_filled + n_new, i] = streams[i][
1 : n_new + 1
]
streams[i] = streams[i][n_new:]
n_filled += n_new
except StopIteration:
valid_batch = False
break
if not valid_batch:
return
data = data.to(self.device)
target = target.to(self.device)
yield data, target, self.l_max
n_retain = min(data.size(0), self.ext_len)
if n_retain > 0:
data[:n_retain] = data[-n_retain:]
data.resize_(n_retain + self.l_max, data.size(1))
def __iter__(self):
# sent_stream is an iterator
sent_stream = self.get_sent_stream()
for batch in self.stream_iterator(sent_stream):
yield batch
class LMMultiFileIterator(LMShuffledIterator):
# NOTE: Not tested
def __init__(
self,
paths,
vocab,
batch_size,
l_max,
device="cpu",
ext_len=None,
shuffle=False,
):
self.paths = paths
self.vocab = vocab
self.batch_size = batch_size
self.l_max = l_max
self.ext_len = ext_len if ext_len is not None else 0
self.device = device
self.shuffle = shuffle
def get_sent_stream(self, path):
sents = self.vocab.encode_file(path, add_double_eos=True)
if self.shuffle:
np.random.shuffle(sents)
sent_stream = iter(sents)
return sent_stream
def __iter__(self):
if self.shuffle:
np.random.shuffle(self.paths)
for path in self.paths:
# sent_stream is an iterator
sent_stream = self.get_sent_stream(path)
for batch in self.stream_iterator(sent_stream):
yield batch
class WikiText2(SequenceDataset):
_name_ = "wt2"
# Vocab arguments
vocab_kwargs = {"special": ["<eos>"], "lower_case": False}
encode_kwargs = {"ordered": True}
init_defaults = {
# Dataset arguments
'l_max': 512,
'bpe': False,
'roll_seed': 42,
'test_split': True,
}
@property
def n_tokens(self):
return len(self.vocab)
def prepare_data(self):
# [21-09-23] probably broken
if not self.data_dir.exists():
subprocess.run(
[
str(project_root / "data" / "getdata.sh"),
self._name_,
str(self.data_dir.parent.absolute()),
],
check=True,
)
def setup(self, stage=None): # [21-09-10 AG]: TODO shouldn't this tokenization happen in the prepare_data? since we're caching it it doesn't really matter, but still
if self.data_dir is None: self.data_dir = default_data_path / self._name_
if self.bpe:
self.vocab = OpenAIVocab()
else:
self.vocab = Vocab(**self.vocab_kwargs)
# Loader arguments
if not self._load_from_cache():
logging.info(f"Producing dataset {self._name_}...")
self._vocab_count()
self.vocab.build_vocab()
self.train = self.vocab.encode_file(
str(self.data_dir / "train.txt"), **self.encode_kwargs
)
self.valid = self.vocab.encode_file(
str(self.data_dir / "valid.txt"), **self.encode_kwargs
)
self.test = self.vocab.encode_file(
str(self.data_dir / "test.txt"), **self.encode_kwargs
)
self._save_to_cache()
# No test set if specified
if not self.test_split:
self.test = None
# Define task
print("Vocab size:", len(self.vocab))
def _vocab_count(self):
self.vocab.count_file(self.data_dir / "train.txt")
self.vocab.count_file(self.data_dir / "valid.txt")
self.vocab.count_file(self.data_dir / "test.txt")
def _save_to_cache(self):
cache_path = self.data_dir / f"cache.pt" # TODO name could include vocab_kwargs to disambiguate
with distributed.sync_workers() as rank:
if rank == 0:
try:
torch.save(
(self.vocab, self.train, self.valid, self.test),
cache_path,
)
logging.info(f"Saved dataset to {cache_path}...")
except:
pass
def _load_from_cache(self):
cache_path = self.data_dir / f"cache.pt"
if cache_path.exists():
logging.info("Loading cached dataset...")
self.vocab, self.train, self.valid, self.test = torch.load(
cache_path
)
return True
else:
return False
def train_dataloader(self, eval=None, **kwargs):
# TODO kwargs absorbs num_workers
return LMOrderedIterator(
self.train,
roll_seed=self.roll_seed,
**kwargs,
)
# def val_dataloader(self, batch_size, **kwargs):
def _eval_dataloader(self, dataset, eval=None, **loader_args):
if dataset is None: return None
# Make eval a list of dictionaries
if eval is None: eval = {}
if not utils.is_list(eval):
eval = [eval]
# Each eval setting overrides the train setting
for eval_args in eval:
for k in loader_args:
if eval_args.get(k, None) is None:
eval_args[k] = loader_args[k]
print("eval loader:", eval_args)
loaders = [LMOrderedIterator(dataset, **eval_args) for eval_args in eval]
if len(loaders) == 1: return loaders[0]
return loaders
def val_dataloader(self, **kwargs):
return self._eval_dataloader(self.valid, **kwargs)
def test_dataloader(self, **kwargs):
return self._eval_dataloader(self.test, **kwargs)
class WikiText103(WikiText2):
_name_ = "wt103"
def _vocab_count(self):
print(self.data_dir)
self.vocab.count_file(self.data_dir / "train.txt")
class PennTreeBank(WikiText2):
_name_ = "ptb"
vocab_kwargs = {"special": ["<eos>"], "lower_case": True}
class EnWik8(WikiText2):
_name_ = "enwik8"
vocab_kwargs = {}
encode_kwargs = {"ordered": True, "add_eos": False}
class Text8(EnWik8):
_name_ = "text8"
class LM1B(WikiText2):
# [21-09-08 AG]: this looks very out of date, the __init__ function should be inherited
_name_ = "lm1b"
vocab_kwargs = {"special": [], "lower_case": False}
cutoffs = [59997, 99997, 639997]
tie_projs = [False] + [False] * len(cutoffs)
def __init__(self, data_dir, bpe=False, *args, **kwargs):
LightningDataModule.__init__(self)
self.data_dir = Path(data_dir)
# self.vocab_type = vocab
if bpe:
self.vocab = OpenAIVocab()
else:
self.vocab = Vocab(
vocab_file=self.data_dir / "1b_word_vocab.txt",
**self.vocab_kwargs,
)
def setup(self, stage=None):
if not self._load_from_cache():
logging.info(f"Producing dataset {self._name_}...")
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
train_paths = list(
(
self.data_dir
/ "1-billion-word-language-modeling-benchmark-r13output"
/ "training-monolingual.tokenized.shuffled"
).glob("news.en-*")
)
self.train = train_paths
self.valid = self.vocab.encode_file(
str(self.data_dir / "valid.txt"),
ordered=False,
add_double_eos=True,
)
self.test = self.vocab.encode_file(
str(self.data_dir / "test.txt"),
ordered=False,
add_double_eos=True,
)
self._save_to_cache()
def train_dataloader(self, *args, **kwargs):
kwargs["shuffle"] = True
return LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
def val_dataloader(self, *args, **kwargs):
return LMShuffledIterator(self.valid, *args, **kwargs)
def test_dataloader(self, *args, **kwargs):
return LMShuffledIterator(self.test, *args, **kwargs)
class Corpus(object):
# AG: only used in get_lm_corpus which is only called in the unit test
def __init__(self, path, dataset, vocab, *args, **kwargs):
self.dataset = dataset
if vocab == "word":
self.vocab = Vocab(*args, **kwargs)
elif vocab == "bpe":
self.vocab = OpenAIVocab()
else:
raise RuntimeError("Unsupported vocab")
if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
self.vocab.count_file(os.path.join(path, "train.txt"))
self.vocab.count_file(os.path.join(path, "valid.txt"))
self.vocab.count_file(os.path.join(path, "test.txt"))
elif self.dataset == "wt103":
self.vocab.count_file(os.path.join(path, "train.txt"))
elif self.dataset == "lm1b":
train_path_pattern = os.path.join(
path,
"1-billion-word-language-modeling-benchmark-r13output",
"training-monolingual.tokenized.shuffled",
"news.en-*",
)
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ["ptb", "wt2", "wt103"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True
)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True
)
elif self.dataset in ["enwik8", "text8"]:
self.train = self.vocab.encode_file(
os.path.join(path, "train.txt"), ordered=True, add_eos=False
)
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"), ordered=True, add_eos=False
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"), ordered=True, add_eos=False
)
elif self.dataset == "lm1b":
self.train = train_paths
self.valid = self.vocab.encode_file(
os.path.join(path, "valid.txt"),
ordered=False,
add_double_eos=True,
)
self.test = self.vocab.encode_file(
os.path.join(path, "test.txt"),
ordered=False,
add_double_eos=True,
)
def get_iterator(self, split, *args, **kwargs):
if split == "train":
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == "lm1b":
kwargs["shuffle"] = True
data_iter = LMMultiFileIterator(
self.train, self.vocab, *args, **kwargs
)
elif split in ["valid", "test"]:
data = self.valid if split == "valid" else self.test
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == "lm1b":
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
| state-spaces-main | src/dataloaders/lm.py |
"""Audio datasets and utilities."""
import os
from os import listdir
from os.path import join
import torch
import torchaudio
from torch import nn
from torch.nn import functional as F
from src.dataloaders.base import default_data_path, SequenceDataset, deprecated
def minmax_scale(tensor, range_min=0, range_max=1):
"""
Min-max scaling to [0, 1].
"""
min_val = torch.amin(tensor, dim=(1, 2), keepdim=True)
max_val = torch.amax(tensor, dim=(1, 2), keepdim=True)
return range_min + (range_max - range_min) * (tensor - min_val) / (max_val - min_val + 1e-6)
def quantize(samples, bits=8, epsilon=0.01):
"""
Linearly quantize a signal in [0, 1] to a signal in [0, q_levels - 1].
"""
q_levels = 1 << bits
samples *= q_levels - epsilon
samples += epsilon / 2
return samples.long()
def dequantize(samples, bits=8):
"""
Dequantize a signal in [0, q_levels - 1].
"""
q_levels = 1 << bits
return samples.float() / (q_levels / 2) - 1
def mu_law_encode(audio, bits=8):
"""
Perform mu-law companding transformation.
"""
mu = torch.tensor((1 << bits) - 1)
# Audio must be min-max scaled between -1 and 1
audio = minmax_scale(audio, range_min=-1, range_max=1)
# Perform mu-law companding transformation.
numerator = torch.log1p(mu * torch.abs(audio + 1e-8))
denominator = torch.log1p(mu)
encoded = torch.sign(audio) * (numerator / denominator)
# Shift signal to [0, 1]
encoded = (encoded + 1) / 2
# Quantize signal to the specified number of levels.
return quantize(encoded, bits=bits)
def mu_law_decode(encoded, bits=8):
"""
Perform inverse mu-law transformation.
"""
mu = (1 << bits) - 1
# Invert the quantization
x = dequantize(encoded, bits=bits)
# Invert the mu-law transformation
x = torch.sign(x) * ((1 + mu)**(torch.abs(x)) - 1) / mu
# Returned values in range [-1, 1]
return x
def linear_encode(samples, bits=8):
"""
Perform scaling and linear quantization.
"""
samples = samples.clone()
samples = minmax_scale(samples)
return quantize(samples, bits=bits)
def linear_decode(samples, bits=8):
"""
Invert the linear quantization.
"""
return dequantize(samples, bits=bits)
def q_zero(bits=8):
"""
The quantized level of the 0.0 value.
"""
return 1 << (bits - 1)
class AbstractAudioDataset(torch.utils.data.Dataset):
def __init__(
self,
bits=8,
sample_len=None,
quantization='linear',
return_type='autoregressive',
drop_last=True,
target_sr=None,
context_len=None,
pad_len=None,
**kwargs,
) -> None:
super().__init__()
self.bits = bits
self.sample_len = sample_len
self.quantization = quantization
self.return_type = return_type
self.drop_last = drop_last
self.target_sr = target_sr
self.zero = q_zero(bits)
self.context_len = context_len
self.pad_len = pad_len
for key, value in kwargs.items():
setattr(self, key, value)
self.file_names = NotImplementedError("Must be assigned in setup().")
self.transforms = {}
self.setup()
self.create_quantizer(self.quantization)
self.create_examples(self.sample_len)
def setup(self):
return NotImplementedError("Must assign a list of filepaths to self.file_names.")
def __getitem__(self, index):
# Load signal
if self.sample_len is not None:
file_name, start_frame, num_frames = self.examples[index]
seq, sr = torchaudio.load(file_name, frame_offset=start_frame, num_frames=num_frames)
else:
seq, sr = torchaudio.load(self.examples[index])
# Average non-mono signals across channels
if seq.shape[0] > 1:
seq = seq.mean(dim=0, keepdim=True)
# Resample signal if required
if self.target_sr is not None and sr != self.target_sr:
if sr not in self.transforms:
self.transforms[sr] = torchaudio.transforms.Resample(orig_freq=sr, new_freq=self.target_sr)
seq = self.transforms[sr](seq)
# Transpose the signal to get (L, 1)
seq = seq.transpose(0, 1)
# Unsqueeze to (1, L, 1)
seq = seq.unsqueeze(0)
# Quantized signal
qseq = self.quantizer(seq, self.bits)
# Squeeze back to (L, 1)
qseq = qseq.squeeze(0)
# Return the signal
if self.return_type == 'autoregressive':
# Autoregressive training
# x is [0, qseq[0], qseq[1], ..., qseq[-2]]
# y is [qseq[0], qseq[1], ..., qseq[-1]]
y = qseq
x = torch.roll(qseq, 1, 0) # Roll the signal 1 step
x[0] = self.zero # Fill the first element with q_0
x = x.squeeze(1) # Squeeze to (L, )
if self.context_len is not None:
y = y[self.context_len:] # Trim the signal
if self.pad_len is not None:
x = torch.cat((torch.zeros(self.pad_len, dtype=self.qtype) + self.zero, x)) # Pad the signal
return x, y
elif self.return_type is None:
return qseq
else:
raise NotImplementedError(f'Invalid return type {self.return_type}')
def __len__(self):
return len(self.examples)
def create_examples(self, sample_len: int):
# Get metadata for all files
self.metadata = [
torchaudio.info(file_name) for file_name in self.file_names
]
if sample_len is not None:
# Reorganize files into a flat list of (file_name, start_frame) pairs
# so that consecutive items are separated by sample_len
self.examples = []
for file_name, metadata in zip(self.file_names, self.metadata):
# Update the sample_len if resampling to target_sr is required
# This is because the resampling will change the length of the signal
# so we need to adjust the sample_len accordingly (e.g. if downsampling
# the sample_len will need to be increased)
sample_len_i = sample_len
if self.target_sr is not None and metadata.sample_rate != self.target_sr:
sample_len_i = int(sample_len * metadata.sample_rate / self.target_sr)
margin = metadata.num_frames % sample_len_i
for start_frame in range(0, metadata.num_frames - margin, sample_len_i):
self.examples.append((file_name, start_frame, sample_len_i))
if margin > 0 and not self.drop_last:
# Last (leftover) example is shorter than sample_len, and equal to the margin
# (must be padded in collate_fn)
self.examples.append((file_name, metadata.num_frames - margin, margin))
else:
self.examples = self.file_names
def create_quantizer(self, quantization: str):
if quantization == 'linear':
self.quantizer = linear_encode
self.dequantizer = linear_decode
self.qtype = torch.long
elif quantization == 'mu-law':
self.quantizer = mu_law_encode
self.dequantizer = mu_law_decode
self.qtype = torch.long
elif quantization is None:
self.quantizer = lambda x, bits: x
self.dequantizer = lambda x, bits: x
self.qtype = torch.float
else:
raise ValueError('Invalid quantization type')
class QuantizedAudioDataset(AbstractAudioDataset):
"""
Adapted from https://github.com/deepsound-project/samplernn-pytorch/blob/master/dataset.py
"""
def __init__(
self,
path,
bits=8,
ratio_min=0,
ratio_max=1,
sample_len=None,
quantization='linear', # [linear, mu-law]
return_type='autoregressive', # [autoregressive, None]
drop_last=False,
target_sr=None,
context_len=None,
pad_len=None,
**kwargs,
):
super().__init__(
bits=bits,
sample_len=sample_len,
quantization=quantization,
return_type=return_type,
drop_last=drop_last,
target_sr=target_sr,
path=path,
ratio_min=ratio_min,
ratio_max=ratio_max,
context_len=context_len,
pad_len=pad_len,
**kwargs,
)
def setup(self):
from natsort import natsorted
file_names = natsorted(
[join(self.path, file_name) for file_name in listdir(self.path)]
)
self.file_names = file_names[
int(self.ratio_min * len(file_names)) : int(self.ratio_max * len(file_names))
]
class QuantizedAutoregressiveAudio(SequenceDataset):
_name_ = 'qautoaudio'
@property
def d_input(self):
return 1
@property
def d_output(self):
return 1 << self.bits
@property
def l_output(self):
return self.sample_len
@property
def n_tokens(self):
return 1 << self.bits
@property
def init_defaults(self):
return {
'path': None,
'bits': 8,
'sample_len': None,
'train_percentage': 0.88,
'quantization': 'linear',
'drop_last': False,
'context_len': None,
'pad_len': None,
}
def setup(self):
from src.dataloaders.audio import QuantizedAudioDataset
assert self.path is not None or self.data_dir is not None, "Pass a path to a folder of audio: either `data_dir` for full directory or `path` for relative path."
if self.data_dir is None:
self.data_dir = default_data_path / self.path
self.dataset_train = QuantizedAudioDataset(
path=self.data_dir,
bits=self.bits,
ratio_min=0,
ratio_max=self.train_percentage,
sample_len=self.sample_len,
quantization=self.quantization,
drop_last=self.drop_last,
context_len=self.context_len,
pad_len=self.pad_len,
)
self.dataset_val = QuantizedAudioDataset(
path=self.data_dir,
bits=self.bits,
ratio_min=self.train_percentage,
ratio_max=self.train_percentage + (1 - self.train_percentage) / 2,
sample_len=self.sample_len,
quantization=self.quantization,
drop_last=self.drop_last,
context_len=self.context_len,
pad_len=self.pad_len,
)
self.dataset_test = QuantizedAudioDataset(
path=self.data_dir,
bits=self.bits,
ratio_min=self.train_percentage + (1 - self.train_percentage) / 2,
ratio_max=1,
sample_len=self.sample_len,
quantization=self.quantization,
drop_last=self.drop_last,
context_len=self.context_len,
pad_len=self.pad_len,
)
def collate_fn(batch):
x, y, *z = zip(*batch)
assert len(z) == 0
lengths = torch.tensor([len(e) for e in x])
max_length = lengths.max()
if self.pad_len is None:
pad_length = int(min(2**max_length.log2().ceil(), self.sample_len) - max_length)
else:
pad_length = int(min(2**max_length.log2().ceil(), self.sample_len + self.pad_len) - max_length)
x = nn.utils.rnn.pad_sequence(
x,
padding_value=self.dataset_train.zero,
batch_first=True,
)
x = F.pad(x, (0, pad_length), value=self.dataset_train.zero)
y = nn.utils.rnn.pad_sequence(
y,
padding_value=-100, # pad with -100 to ignore these locations in cross-entropy loss
batch_first=True,
)
return x, y, {"lengths": lengths}
if not self.drop_last:
self._collate_fn = collate_fn # TODO not tested
class SpeechCommands09(AbstractAudioDataset):
CLASSES = [
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
]
CLASS_TO_IDX = dict(zip(CLASSES, range(len(CLASSES))))
def __init__(
self,
path,
bits=8,
split='train',
sample_len=16000,
quantization='linear', # [linear, mu-law]
return_type='autoregressive', # [autoregressive, None]
drop_last=False,
target_sr=None,
dequantize=False,
pad_len=None,
**kwargs,
):
super().__init__(
bits=bits,
sample_len=sample_len,
quantization=quantization,
return_type=return_type,
split=split,
drop_last=drop_last,
target_sr=target_sr,
path=path,
dequantize=dequantize,
pad_len=pad_len,
**kwargs,
)
def setup(self):
with open(join(self.path, 'validation_list.txt')) as f:
validation_files = set([line.rstrip() for line in f.readlines()])
with open(join(self.path, 'testing_list.txt')) as f:
test_files = set([line.rstrip() for line in f.readlines()])
# Get all files in the paths named after CLASSES
self.file_names = []
for class_name in self.CLASSES:
self.file_names += [
(class_name, file_name)
for file_name in listdir(join(self.path, class_name))
if file_name.endswith('.wav')
]
# Keep files based on the split
if self.split == 'train':
self.file_names = [
join(self.path, class_name, file_name)
for class_name, file_name in self.file_names
if join(class_name, file_name) not in validation_files
and join(class_name, file_name) not in test_files
]
elif self.split == 'validation':
self.file_names = [
join(self.path, class_name, file_name)
for class_name, file_name in self.file_names
if join(class_name, file_name) in validation_files
]
elif self.split == 'test':
self.file_names = [
join(self.path, class_name, file_name)
for class_name, file_name in self.file_names
if join(class_name, file_name) in test_files
]
def __getitem__(self, index):
item = super().__getitem__(index)
x, y, *z = item
if self.dequantize:
x = self.dequantizer(x).unsqueeze(1)
return x, y, *z
class SpeechCommands09Autoregressive(SequenceDataset):
_name_ = 'sc09'
@property
def d_input(self):
return 1
@property
def d_output(self):
return 1 << self.bits
@property
def l_output(self):
return self.sample_len
@property
def n_tokens(self):
return 1 << self.bits
@property
def init_defaults(self):
return {
'bits': 8,
'quantization': 'mu-law',
'dequantize': False,
'pad_len': None,
}
def setup(self):
from src.dataloaders.audio import SpeechCommands09
self.data_dir = self.data_dir or default_data_path / self._name_
self.dataset_train = SpeechCommands09(
path=self.data_dir,
bits=self.bits,
split='train',
quantization=self.quantization,
dequantize=self.dequantize,
pad_len=self.pad_len,
)
self.dataset_val = SpeechCommands09(
path=self.data_dir,
bits=self.bits,
split='validation',
quantization=self.quantization,
dequantize=self.dequantize,
pad_len=self.pad_len,
)
self.dataset_test = SpeechCommands09(
path=self.data_dir,
bits=self.bits,
split='test',
quantization=self.quantization,
dequantize=self.dequantize,
pad_len=self.pad_len,
)
self.sample_len = self.dataset_train.sample_len
def _collate_fn(self, batch):
x, y, *z = zip(*batch)
assert len(z) == 0
lengths = torch.tensor([len(e) for e in x])
max_length = lengths.max()
if self.pad_len is None:
pad_length = int(min(2**max_length.log2().ceil(), self.sample_len) - max_length)
else:
pad_length = 0 # int(self.sample_len + self.pad_len - max_length)
x = nn.utils.rnn.pad_sequence(
x,
padding_value=self.dataset_train.zero if not self.dequantize else 0.,
batch_first=True,
)
x = F.pad(x, (0, pad_length), value=self.dataset_train.zero if not self.dequantize else 0.)
y = nn.utils.rnn.pad_sequence(
y,
padding_value=-100, # pad with -100 to ignore these locations in cross-entropy loss
batch_first=True,
)
y = F.pad(y, (0, 0, 0, pad_length), value=-100) # (batch, length, 1)
return x, y, {"lengths": lengths}
class MaestroDataset(AbstractAudioDataset):
YEARS = [2004, 2006, 2008, 2009, 2011, 2013, 2014, 2015, 2017, 2018]
SPLITS = ['train', 'validation', 'test']
def __init__(
self,
path,
bits=8,
split='train',
sample_len=None,
quantization='linear',
return_type='autoregressive',
drop_last=False,
target_sr=16000,
):
super().__init__(
bits=bits,
sample_len=sample_len,
quantization=quantization,
return_type=return_type,
split=split,
path=path,
drop_last=drop_last,
target_sr=target_sr,
)
def setup(self):
import pandas as pd
from natsort import natsorted
self.path = str(self.path)
# Pull out examples in the specified split
df = pd.read_csv(self.path + '/maestro-v3.0.0.csv')
df = df[df['split'] == self.split]
file_names = []
for filename in df['audio_filename'].values:
filepath = os.path.join(self.path, filename)
assert os.path.exists(filepath)
file_names.append(filepath)
self.file_names = natsorted(file_names)
class MaestroAutoregressive(SequenceDataset):
_name_ = 'maestro'
@property
def d_input(self):
return 1
@property
def d_output(self):
return 1 << self.bits
@property
def l_output(self):
return self.sample_len
@property
def n_tokens(self):
return 1 << self.bits
@property
def init_defaults(self):
return {
'bits': 8,
'sample_len': None,
'quantization': 'mu-law',
}
def setup(self):
from src.dataloaders.audio import MaestroDataset
self.data_dir = self.data_dir or default_data_path / self._name_ / 'maestro-v3.0.0'
self.dataset_train = MaestroDataset(
path=self.data_dir,
bits=self.bits,
split='train',
sample_len=self.sample_len,
quantization=self.quantization,
)
self.dataset_val = MaestroDataset(
path=self.data_dir,
bits=self.bits,
split='validation',
sample_len=self.sample_len,
quantization=self.quantization,
)
self.dataset_test = MaestroDataset(
path=self.data_dir,
bits=self.bits,
split='test',
sample_len=self.sample_len,
quantization=self.quantization,
)
def _collate_fn(self, batch):
x, y, *z = zip(*batch)
assert len(z) == 0
lengths = torch.tensor([len(e) for e in x])
max_length = lengths.max()
pad_length = int(min(max(1024, 2**max_length.log2().ceil()), self.sample_len) - max_length)
x = nn.utils.rnn.pad_sequence(
x,
padding_value=self.dataset_train.zero,
batch_first=True,
)
x = F.pad(x, (0, pad_length), value=self.dataset_train.zero)
y = nn.utils.rnn.pad_sequence(
y,
padding_value=self.dataset_train.zero,
batch_first=True,
)
return x, y, {"lengths": lengths}
class LJSpeech(QuantizedAudioDataset):
def __init__(
self,
path,
bits=8,
ratio_min=0,
ratio_max=1,
sample_len=None,
quantization='linear', # [linear, mu-law]
return_type='autoregressive', # [autoregressive, None]
drop_last=False,
target_sr=None,
use_text=False,
):
super().__init__(
bits=bits,
sample_len=sample_len,
quantization=quantization,
return_type=return_type,
drop_last=drop_last,
target_sr=target_sr,
path=path,
ratio_min=ratio_min,
ratio_max=ratio_max,
use_text=use_text,
)
def setup(self):
import pandas as pd
from sklearn.preprocessing import LabelEncoder
super().setup()
self.vocab_size = None
if self.use_text:
self.transcripts = {}
with open(str(self.path.parents[0] / 'metadata.csv'), 'r') as f:
for line in f:
index, raw_transcript, normalized_transcript = line.rstrip('\n').split("|")
self.transcripts[index] = normalized_transcript
# df = pd.read_csv(self.path.parents[0] / 'metadata.csv', sep="|", header=None)
# self.transcripts = dict(zip(df[0], df[2])) # use normalized transcripts
self.tok_transcripts = {}
self.vocab = set()
for file_name in self.file_names:
# Very simple tokenization, character by character
# Capitalization is ignored for simplicity
file_name = file_name.split('/')[-1].split('.')[0]
self.tok_transcripts[file_name] = list(self.transcripts[file_name].lower())
self.vocab.update(self.tok_transcripts[file_name])
# Fit a label encoder mapping characters to numbers
self.label_encoder = LabelEncoder()
self.label_encoder.fit(list(self.vocab))
# add a token for padding, no additional token for UNK (our dev/test set contain no unseen characters)
self.vocab_size = len(self.vocab) + 1
# Finalize the tokenized transcripts
for file_name in self.file_names:
file_name = file_name.split('/')[-1].split('.')[0]
self.tok_transcripts[file_name] = torch.tensor(self.label_encoder.transform(self.tok_transcripts[file_name]))
def __getitem__(self, index):
item = super().__getitem__(index)
if self.use_text:
file_name, _, _ = self.examples[index]
tok_transcript = self.tok_transcripts[file_name.split('/')[-1].split('.')[0]]
return *item, tok_transcript
return item
class LJSpeechAutoregressive(SequenceDataset):
_name_ = 'ljspeech'
@property
def d_input(self):
return 1
@property
def d_output(self):
return 1 << self.bits
@property
def l_output(self):
return self.sample_len
@property
def n_tokens(self):
return 1 << self.bits
@property
def init_defaults(self):
return {
'bits': 8,
'sample_len': None,
'quantization': 'mu-law',
'train_percentage': 0.88,
'use_text': False,
}
def setup(self):
from src.dataloaders.audio import LJSpeech
self.data_dir = self.data_dir or default_data_path / self._name_ / 'LJSpeech-1.1' / 'wavs'
self.dataset_train = LJSpeech(
path=self.data_dir,
bits=self.bits,
ratio_min=0,
ratio_max=self.train_percentage,
sample_len=self.sample_len,
quantization=self.quantization,
target_sr=16000,
use_text=self.use_text,
)
self.dataset_val = LJSpeech(
path=self.data_dir,
bits=self.bits,
ratio_min=self.train_percentage,
ratio_max=self.train_percentage + (1 - self.train_percentage) / 2,
sample_len=self.sample_len,
quantization=self.quantization,
target_sr=16000,
use_text=self.use_text,
)
self.dataset_test = LJSpeech(
path=self.data_dir,
bits=self.bits,
ratio_min=self.train_percentage + (1 - self.train_percentage) / 2,
ratio_max=1,
sample_len=self.sample_len,
quantization=self.quantization,
target_sr=16000,
use_text=self.use_text,
)
self.vocab_size = self.dataset_train.vocab_size
def _collate_fn(self, batch):
x, y, *z = zip(*batch)
if self.use_text:
tokens = z[0]
text_lengths = torch.tensor([len(e) for e in tokens])
tokens = nn.utils.rnn.pad_sequence(
tokens,
padding_value=self.vocab_size - 1,
batch_first=True,
)
else:
assert len(z) == 0
lengths = torch.tensor([len(e) for e in x])
max_length = lengths.max()
pad_length = int(min(2**max_length.log2().ceil(), self.sample_len) - max_length)
x = nn.utils.rnn.pad_sequence(
x,
padding_value=self.dataset_train.zero,
batch_first=True,
)
x = F.pad(x, (0, pad_length), value=self.dataset_train.zero)
y = nn.utils.rnn.pad_sequence(
y,
padding_value=-100, # pad with -100 to ignore these locations in cross-entropy loss
batch_first=True,
)
if self.use_text:
return x, y, {"lengths": lengths, "tokens": tokens, "text_lengths": text_lengths}
else:
return x, y, {"lengths": lengths}
class _SpeechCommands09Classification(SpeechCommands09):
def __init__(
self,
path,
bits=8,
split='train',
sample_len=16000,
quantization='linear', # [linear, mu-law]
drop_last=False,
target_sr=None,
**kwargs,
):
super().__init__(
bits=bits,
sample_len=sample_len,
quantization=quantization,
return_type=None,
split=split,
drop_last=drop_last,
target_sr=target_sr,
path=path,
**kwargs,
)
def __getitem__(self, index):
x = super().__getitem__(index)
x = mu_law_decode(x)
y = torch.tensor(self.CLASS_TO_IDX[self.file_names[index].split("/")[-2]])
return x, y
class SpeechCommands09Classification(SequenceDataset):
_name_ = 'sc09cls'
@property
def d_input(self):
return 1
@property
def d_output(self):
return 10
@property
def l_output(self):
return 0
@property
def n_tokens(self):
return 1 << self.bits
@property
def init_defaults(self):
return {
'bits': 8,
'quantization': 'mu-law',
}
def setup(self):
from src.dataloaders.audio import _SpeechCommands09Classification
self.data_dir = self.data_dir or default_data_path / 'sc09'
self.dataset_train = _SpeechCommands09Classification(
path=self.data_dir,
bits=self.bits,
split='train',
quantization=self.quantization,
)
self.dataset_val = _SpeechCommands09Classification(
path=self.data_dir,
bits=self.bits,
split='validation',
quantization=self.quantization,
)
self.dataset_test = _SpeechCommands09Classification(
path=self.data_dir,
bits=self.bits,
split='test',
quantization=self.quantization,
)
self.sample_len = self.dataset_train.sample_len
def collate_fn(self, batch):
x, y, *z = zip(*batch)
assert len(z) == 0
lengths = torch.tensor([len(e) for e in x])
max_length = lengths.max()
pad_length = int(min(2**max_length.log2().ceil(), self.sample_len) - max_length)
x = nn.utils.rnn.pad_sequence(
x,
padding_value=self.dataset_train.zero,
batch_first=True,
)
x = F.pad(x, (0, pad_length), value=0.)#self.dataset_train.zero)
y = torch.tensor(y)
return x, y, {"lengths": lengths}
@deprecated
class SpeechCommandsGeneration(SequenceDataset):
_name_ = "scg"
init_defaults = {
"mfcc": False,
"dropped_rate": 0.0,
"length": 16000,
"all_classes": False,
"discrete_input": False,
}
@property
def n_tokens(self):
return 256 if self.discrete_input else None
def init(self):
if self.mfcc:
self.d_input = 20
self.L = 161
else:
self.d_input = 1
self.L = self.length
if self.dropped_rate > 0.0:
self.d_input += 1
self.d_output = 256
self.l_output = self.length
def setup(self):
from src.dataloaders.datasets.sc import _SpeechCommandsGeneration
# TODO refactor with data_dir argument
self.dataset_train = _SpeechCommandsGeneration(
partition="train",
length=self.length, # self.L,
mfcc=self.mfcc,
sr=1,
dropped_rate=self.dropped_rate,
path=default_data_path,
all_classes=self.all_classes,
discrete_input=self.discrete_input,
)
self.dataset_val = _SpeechCommandsGeneration(
partition="val",
length=self.length, # self.L,
mfcc=self.mfcc,
sr=1,
dropped_rate=self.dropped_rate,
path=default_data_path,
all_classes=self.all_classes,
discrete_input=self.discrete_input,
)
self.dataset_test = _SpeechCommandsGeneration(
partition="test",
length=self.length, # self.L,
mfcc=self.mfcc,
sr=1,
dropped_rate=self.dropped_rate,
path=default_data_path,
all_classes=self.all_classes,
discrete_input=self.discrete_input,
)
@classmethod
def _return_callback(cls, return_value, *args, **kwargs):
x, y, *z = return_value
return x, y.long(), *z
@deprecated
class Music(SequenceDataset):
_name_ = "music"
@property
def d_input(self):
return 1
@property
def d_output(self):
return 256
@property
def l_output(self):
return self.sample_rate * self.sample_len
@property
def n_tokens(self):
return 256 if self.discrete_input else None
@property
def init_defaults(self):
return {
"sample_len": 1,
"sample_rate": 16000,
"train_percentage": 0.88,
"discrete_input": False,
}
def init(self):
return
def setup(self):
from src.dataloaders.music import _Music
self.music_class = _Music(
path=default_data_path,
sample_len=self.sample_len, # In seconds
sample_rate=self.sample_rate,
train_percentage=self.train_percentage, # Use settings from SampleRNN paper
discrete_input=self.discrete_input,
)
self.dataset_train = self.music_class.get_data("train")
self.dataset_test = self.music_class.get_data("test")
self.dataset_val = self.music_class.get_data("val")
@classmethod
def _return_callback(cls, return_value, *args, **kwargs):
x, y, *z = return_value
return x, y.long(), *z
| state-spaces-main | src/dataloaders/audio.py |
"""Core dataloader interface."""
import os
import pickle
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torchaudio.functional as TF
import torchvision
from einops import rearrange
from einops.layers.torch import Rearrange
from src.utils import is_list, permutations
from torch.nn import functional as F
def deprecated(cls_or_func):
def _deprecated(*args, **kwargs):
print(f"{cls_or_func} is deprecated")
return cls_or_func(*args, **kwargs)
return _deprecated
# Default data path is environment variable or hippo/data
if (default_data_path := os.getenv("DATA_PATH")) is None:
default_data_path = Path(__file__).parent.parent.parent.absolute()
default_data_path = default_data_path / "data"
else:
default_data_path = Path(default_data_path).absolute()
class DefaultCollateMixin:
"""Controls collating in the DataLoader
The CollateMixin classes instantiate a dataloader by separating collate arguments with the rest of the dataloader arguments. Instantiations of this class should modify the callback functions as desired, and modify the collate_args list. The class then defines a _dataloader() method which takes in a DataLoader constructor and arguments, constructs a collate_fn based on the collate_args, and passes the rest of the arguments into the constructor.
"""
@classmethod
def _collate_callback(cls, x, *args, **kwargs):
"""
Modify the behavior of the default _collate method.
"""
return x
_collate_arg_names = []
@classmethod
def _return_callback(cls, return_value, *args, **kwargs):
"""
Modify the return value of the collate_fn.
Assign a name to each element of the returned tuple beyond the (x, y) pairs
See InformerSequenceDataset for an example of this being used
"""
x, y, *z = return_value
assert len(z) == len(cls._collate_arg_names), "Specify a name for each auxiliary data item returned by dataset"
return x, y, {k: v for k, v in zip(cls._collate_arg_names, z)}
@classmethod
def _collate(cls, batch, *args, **kwargs):
# From https://github.com/pyforch/pytorch/blob/master/torch/utils/data/_utils/collate.py
elem = batch[0]
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
x = torch.stack(batch, dim=0, out=out)
# Insert custom functionality into the collate_fn
x = cls._collate_callback(x, *args, **kwargs)
return x
else:
return torch.tensor(batch)
@classmethod
def _collate_fn(cls, batch, *args, **kwargs):
"""
Default collate function.
Generally accessed by the dataloader() methods to pass into torch DataLoader
Arguments:
batch: list of (x, y) pairs
args, kwargs: extra arguments that get passed into the _collate_callback and _return_callback
"""
x, y, *z = zip(*batch)
x = cls._collate(x, *args, **kwargs)
y = cls._collate(y)
z = [cls._collate(z_) for z_ in z]
return_value = (x, y, *z)
return cls._return_callback(return_value, *args, **kwargs)
# List of loader arguments to pass into collate_fn
collate_args = []
def _dataloader(self, dataset, **loader_args):
collate_args = {k: loader_args[k] for k in loader_args if k in self.collate_args}
loader_args = {k: loader_args[k] for k in loader_args if k not in self.collate_args}
loader_cls = loader_registry[loader_args.pop("_name_", None)]
return loader_cls(
dataset=dataset,
collate_fn=partial(self._collate_fn, **collate_args),
**loader_args,
)
class SequenceResolutionCollateMixin(DefaultCollateMixin):
"""self.collate_fn(resolution) produces a collate function that subsamples elements of the sequence"""
@classmethod
def _collate_callback(cls, x, resolution=None):
if resolution is None:
pass
elif is_list(resolution): # Resize to first resolution, then apply resampling technique
# Sample to first resolution
x = x.squeeze(-1) # (B, L)
L = x.size(1)
x = x[:, ::resolution[0]] # assume length is first axis after batch
_L = L // resolution[0]
for r in resolution[1:]:
x = TF.resample(x, _L, L//r)
_L = L // r
x = x.unsqueeze(-1) # (B, L, 1)
else:
# Assume x is (B, L_0, L_1, ..., L_k, C) for x.ndim > 2 and (B, L) for x.ndim = 2
assert x.ndim >= 2
n_resaxes = max(1, x.ndim - 2) # [AG 22/07/02] this line looks suspicious... are there cases with 2 axes?
# rearrange: b (l_0 res_0) (l_1 res_1) ... (l_k res_k) ... -> res_0 res_1 .. res_k b l_0 l_1 ...
lhs = "b " + " ".join([f"(l{i} res{i})" for i in range(n_resaxes)]) + " ..."
rhs = " ".join([f"res{i}" for i in range(n_resaxes)]) + " b " + " ".join([f"l{i}" for i in range(n_resaxes)]) + " ..."
x = rearrange(x, lhs + " -> " + rhs, **{f'res{i}': resolution for i in range(n_resaxes)})
x = x[tuple([0] * n_resaxes)]
return x
@classmethod
def _return_callback(cls, return_value, resolution=None):
return *return_value, {"rate": resolution}
collate_args = ['resolution']
class ImageResolutionCollateMixin(SequenceResolutionCollateMixin):
"""self.collate_fn(resolution, img_size) produces a collate function that resizes inputs to size img_size/resolution"""
_interpolation = torchvision.transforms.InterpolationMode.BILINEAR
_antialias = True
@classmethod
def _collate_callback(cls, x, resolution=None, img_size=None, channels_last=True):
if x.ndim < 4:
return super()._collate_callback(x, resolution=resolution)
if img_size is None:
x = super()._collate_callback(x, resolution=resolution)
else:
x = rearrange(x, 'b ... c -> b c ...') if channels_last else x
_size = round(img_size/resolution)
x = torchvision.transforms.functional.resize(
x,
size=[_size, _size],
interpolation=cls._interpolation,
antialias=cls._antialias,
)
x = rearrange(x, 'b c ... -> b ... c') if channels_last else x
return x
@classmethod
def _return_callback(cls, return_value, resolution=None, img_size=None, channels_last=True):
return *return_value, {"rate": resolution}
collate_args = ['resolution', 'img_size', 'channels_last']
class TBPTTDataLoader(torch.utils.data.DataLoader):
"""
Adapted from https://github.com/deepsound-project/samplernn-pytorch
"""
def __init__(
self,
dataset,
batch_size,
chunk_len,
overlap_len,
*args,
**kwargs
):
super().__init__(dataset, batch_size, *args, **kwargs)
assert chunk_len is not None and overlap_len is not None, "TBPTTDataLoader: chunk_len and overlap_len must be specified."
# Zero padding value, given by the dataset
self.zero = dataset.zero if hasattr(dataset, "zero") else 0
# Size of the chunks to be fed into the model
self.chunk_len = chunk_len
# Keep `overlap_len` from the previous chunk (e.g. SampleRNN requires this)
self.overlap_len = overlap_len
def __iter__(self):
for batch in super().__iter__():
x, y, z = batch # (B, L) (B, L, 1) {'lengths': (B,)}
# Pad with self.overlap_len - 1 zeros
pad = lambda x, val: torch.cat([x.new_zeros((x.shape[0], self.overlap_len - 1, *x.shape[2:])) + val, x], dim=1)
x = pad(x, self.zero)
y = pad(y, 0)
z = { k: pad(v, 0) for k, v in z.items() if v.ndim > 1 }
_, seq_len, *_ = x.shape
reset = True
for seq_begin in list(range(self.overlap_len - 1, seq_len, self.chunk_len))[:-1]:
from_index = seq_begin - self.overlap_len + 1
to_index = seq_begin + self.chunk_len
# TODO: check this
# Ensure divisible by overlap_len
if self.overlap_len > 0:
to_index = min(to_index, seq_len - ((seq_len - self.overlap_len + 1) % self.overlap_len))
x_chunk = x[:, from_index:to_index]
if len(y.shape) == 3:
y_chunk = y[:, seq_begin:to_index]
else:
y_chunk = y
z_chunk = {k: v[:, from_index:to_index] for k, v in z.items() if len(v.shape) > 1}
yield (x_chunk, y_chunk, {**z_chunk, "reset": reset})
reset = False
def __len__(self):
raise NotImplementedError()
# class SequenceDataset(LightningDataModule):
# [21-09-10 AG] Subclassing LightningDataModule fails due to trying to access _has_setup_fit. No idea why. So we just provide our own class with the same core methods as LightningDataModule (e.g. setup)
class SequenceDataset(DefaultCollateMixin):
registry = {}
_name_ = NotImplementedError("Dataset must have shorthand name")
# Since subclasses do not specify __init__ which is instead handled by this class
# Subclasses can provide a list of default arguments which are automatically registered as attributes
# TODO it might be possible to write this as a @dataclass, but it seems tricky to separate from the other features of this class such as the _name_ and d_input/d_output
@property
def init_defaults(self):
return {}
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.registry[cls._name_] = cls
def __init__(self, _name_, data_dir=None, **dataset_cfg):
assert _name_ == self._name_
self.data_dir = Path(data_dir).absolute() if data_dir is not None else None
# Add all arguments to self
init_args = self.init_defaults.copy()
init_args.update(dataset_cfg)
for k, v in init_args.items():
setattr(self, k, v)
# The train, val, test datasets must be set by `setup()`
self.dataset_train = self.dataset_val = self.dataset_test = None
self.init()
def init(self):
"""Hook called at end of __init__, override this instead of __init__"""
pass
def setup(self):
"""This method should set self.dataset_train, self.dataset_val, and self.dataset_test."""
raise NotImplementedError
def split_train_val(self, val_split):
"""
Randomly split self.dataset_train into a new (self.dataset_train, self.dataset_val) pair.
"""
train_len = int(len(self.dataset_train) * (1.0 - val_split))
self.dataset_train, self.dataset_val = torch.utils.data.random_split(
self.dataset_train,
(train_len, len(self.dataset_train) - train_len),
generator=torch.Generator().manual_seed(
getattr(self, "seed", 42)
), # PL is supposed to have a way to handle seeds properly, but doesn't seem to work for us
)
def train_dataloader(self, **kwargs):
return self._train_dataloader(self.dataset_train, **kwargs)
def _train_dataloader(self, dataset, **kwargs):
if dataset is None: return
kwargs['shuffle'] = 'sampler' not in kwargs # shuffle cant be True if we have custom sampler
return self._dataloader(dataset, **kwargs)
def val_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_val, **kwargs)
def test_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_test, **kwargs)
def _eval_dataloader(self, dataset, **kwargs):
if dataset is None: return
# Note that shuffle=False by default
return self._dataloader(dataset, **kwargs)
def __str__(self):
return self._name_
class ResolutionSequenceDataset(SequenceDataset, SequenceResolutionCollateMixin):
def _train_dataloader(self, dataset, train_resolution=None, eval_resolutions=None, **kwargs):
if train_resolution is None: train_resolution = [1]
if not is_list(train_resolution): train_resolution = [train_resolution]
assert len(train_resolution) == 1, "Only one train resolution supported for now."
return super()._train_dataloader(dataset, resolution=train_resolution[0], **kwargs)
def _eval_dataloader(self, dataset, train_resolution=None, eval_resolutions=None, **kwargs):
if dataset is None: return
if eval_resolutions is None: eval_resolutions = [1]
if not is_list(eval_resolutions): eval_resolutions = [eval_resolutions]
dataloaders = []
for resolution in eval_resolutions:
dataloaders.append(super()._eval_dataloader(dataset, resolution=resolution, **kwargs))
return (
{
None if res == 1 else str(res): dl
for res, dl in zip(eval_resolutions, dataloaders)
}
if dataloaders is not None else None
)
class ImageResolutionSequenceDataset(ResolutionSequenceDataset, ImageResolutionCollateMixin):
pass
# Registry for dataloader class
loader_registry = {
"tbptt": TBPTTDataLoader,
None: torch.utils.data.DataLoader, # default case
}
| state-spaces-main | src/dataloaders/base.py |
"""Utilities for working with .ts files.
Taken from https://github.com/ChangWeiTan/TS-Extrinsic-Regression/blob/master/utils/data_loader.py.
Required to handle the @targetlabel tag which sktime.data_io.load_from_tsfile_to_dataframe does not support.
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
name = "DataLoader"
regression_datasets = ["AustraliaRainfall",
"HouseholdPowerConsumption1",
"HouseholdPowerConsumption2",
"BeijingPM25Quality",
"BeijingPM10Quality",
"Covid3Month",
"LiveFuelMoistureContent",
"FloodModeling1",
"FloodModeling2",
"FloodModeling3",
"AppliancesEnergy",
"BenzeneConcentration",
"NewsHeadlineSentiment",
"NewsTitleSentiment",
"BIDMC32RR",
"BIDMC32HR",
"BIDMC32SpO2",
"IEEEPPG",
"PPGDalia"]
# The following code is adapted from the python package sktime to read .ts file.
class TsFileParseException(Exception):
"""
Should be raised when parsing a .ts file and the format is incorrect.
"""
pass
def load_from_tsfile_to_dataframe(full_file_path_and_name, return_separate_X_and_y=True,
replace_missing_vals_with='NaN'):
"""Loads data from a .ts file into a Pandas DataFrame.
Parameters
----------
full_file_path_and_name: str
The full pathname of the .ts file to read.
return_separate_X_and_y: bool
true if X and Y values should be returned as separate Data Frames (X) and a numpy array (y), false otherwise.
This is only relevant for data that
replace_missing_vals_with: str
The value that missing values in the text file should be replaced with prior to parsing.
Returns
-------
DataFrame, ndarray
If return_separate_X_and_y then a tuple containing a DataFrame and a numpy array containing the relevant time-series and corresponding class values.
DataFrame
If not return_separate_X_and_y then a single DataFrame containing all time-series and (if relevant) a column "class_vals" the associated class values.
"""
# Initialize flags and variables used when parsing the file
metadata_started = False
data_started = False
has_problem_name_tag = False
has_timestamps_tag = False
has_univariate_tag = False
has_class_labels_tag = False
has_target_labels_tag = False
has_data_tag = False
previous_timestamp_was_float = None
previous_timestamp_was_int = None
previous_timestamp_was_timestamp = None
num_dimensions = None
is_first_case = True
instance_list = []
class_val_list = []
line_num = 0
# Parse the file
# print(full_file_path_and_name)
# with open(full_file_path_and_name, 'r', encoding='utf-8') as file:
with open(full_file_path_and_name, 'r', encoding='latin1') as file:
for line in tqdm(file):
# print(".", end='')
# Strip white space from start/end of line and change to lowercase for use below
line = line.strip().lower()
# Empty lines are valid at any point in a file
if line:
# Check if this line contains metadata
# Please note that even though metadata is stored in this function it is not currently published externally
if line.startswith("@problemname"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("problemname tag requires an associated value")
problem_name = line[len("@problemname") + 1:]
has_problem_name_tag = True
metadata_started = True
elif line.startswith("@timestamps"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException("timestamps tag requires an associated Boolean value")
elif tokens[1] == "true":
timestamps = True
elif tokens[1] == "false":
timestamps = False
else:
raise TsFileParseException("invalid timestamps value")
has_timestamps_tag = True
metadata_started = True
elif line.startswith("@univariate"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len != 2:
raise TsFileParseException("univariate tag requires an associated Boolean value")
elif tokens[1] == "true":
univariate = True
elif tokens[1] == "false":
univariate = False
else:
raise TsFileParseException("invalid univariate value")
has_univariate_tag = True
metadata_started = True
elif line.startswith("@classlabel"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("classlabel tag requires an associated Boolean value")
if tokens[1] == "true":
class_labels = True
elif tokens[1] == "false":
class_labels = False
else:
raise TsFileParseException("invalid classLabel value")
# Check if we have any associated class values
if token_len == 2 and class_labels:
raise TsFileParseException("if the classlabel tag is true then class values must be supplied")
has_class_labels_tag = True
class_label_list = [token.strip() for token in tokens[2:]]
metadata_started = True
elif line.startswith("@targetlabel"):
# Check that the data has not started
if data_started:
raise TsFileParseException("metadata must come before data")
# Check that the associated value is valid
tokens = line.split(' ')
token_len = len(tokens)
if token_len == 1:
raise TsFileParseException("targetlabel tag requires an associated Boolean value")
if tokens[1] == "true":
target_labels = True
elif tokens[1] == "false":
target_labels = False
else:
raise TsFileParseException("invalid targetLabel value")
has_target_labels_tag = True
class_val_list = []
metadata_started = True
# Check if this line contains the start of data
elif line.startswith("@data"):
if line != "@data":
raise TsFileParseException("data tag should not have an associated value")
if data_started and not metadata_started:
raise TsFileParseException("metadata must come before data")
else:
has_data_tag = True
data_started = True
# If the 'data tag has been found then metadata has been parsed and data can be loaded
elif data_started:
# Check that a full set of metadata has been provided
incomplete_regression_meta_data = not has_problem_name_tag or not has_timestamps_tag or not has_univariate_tag or not has_target_labels_tag or not has_data_tag
incomplete_classification_meta_data = not has_problem_name_tag or not has_timestamps_tag or not has_univariate_tag or not has_class_labels_tag or not has_data_tag
if incomplete_regression_meta_data and incomplete_classification_meta_data:
raise TsFileParseException("a full set of metadata has not been provided before the data")
# Replace any missing values with the value specified
line = line.replace("?", replace_missing_vals_with)
# Check if we dealing with data that has timestamps
if timestamps:
# We're dealing with timestamps so cannot just split line on ':' as timestamps may contain one
has_another_value = False
has_another_dimension = False
timestamps_for_dimension = []
values_for_dimension = []
this_line_num_dimensions = 0
line_len = len(line)
char_num = 0
while char_num < line_len:
# Move through any spaces
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# See if there is any more data to read in or if we should validate that read thus far
if char_num < line_len:
# See if we have an empty dimension (i.e. no values)
if line[char_num] == ":":
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series())
this_line_num_dimensions += 1
has_another_value = False
has_another_dimension = True
timestamps_for_dimension = []
values_for_dimension = []
char_num += 1
else:
# Check if we have reached a class label
if line[char_num] != "(" and target_labels:
class_val = line[char_num:].strip()
# if class_val not in class_val_list:
# raise TsFileParseException(
# "the class value '" + class_val + "' on line " + str(
# line_num + 1) + " is not valid")
class_val_list.append(float(class_val))
char_num = line_len
has_another_value = False
has_another_dimension = False
timestamps_for_dimension = []
values_for_dimension = []
else:
# Read in the data contained within the next tuple
if line[char_num] != "(" and not target_labels:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " does not start with a '('")
char_num += 1
tuple_data = ""
while char_num < line_len and line[char_num] != ")":
tuple_data += line[char_num]
char_num += 1
if char_num >= line_len or line[char_num] != ")":
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " does not end with a ')'")
# Read in any spaces immediately after the current tuple
char_num += 1
while char_num < line_len and str.isspace(line[char_num]):
char_num += 1
# Check if there is another value or dimension to process after this tuple
if char_num >= line_len:
has_another_value = False
has_another_dimension = False
elif line[char_num] == ",":
has_another_value = True
has_another_dimension = False
elif line[char_num] == ":":
has_another_value = False
has_another_dimension = True
char_num += 1
# Get the numeric value for the tuple by reading from the end of the tuple data backwards to the last comma
last_comma_index = tuple_data.rfind(',')
if last_comma_index == -1:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " contains a tuple that has no comma inside of it")
try:
value = tuple_data[last_comma_index + 1:]
value = float(value)
except ValueError:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " contains a tuple that does not have a valid numeric value")
# Check the type of timestamp that we have
timestamp = tuple_data[0: last_comma_index]
try:
timestamp = int(timestamp)
timestamp_is_int = True
timestamp_is_timestamp = False
except ValueError:
timestamp_is_int = False
if not timestamp_is_int:
try:
timestamp = float(timestamp)
timestamp_is_float = True
timestamp_is_timestamp = False
except ValueError:
timestamp_is_float = False
if not timestamp_is_int and not timestamp_is_float:
try:
timestamp = timestamp.strip()
timestamp_is_timestamp = True
except ValueError:
timestamp_is_timestamp = False
# Make sure that the timestamps in the file (not just this dimension or case) are consistent
if not timestamp_is_timestamp and not timestamp_is_int and not timestamp_is_float:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " contains a tuple that has an invalid timestamp '" + timestamp + "'")
if previous_timestamp_was_float is not None and previous_timestamp_was_float and not timestamp_is_float:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " contains tuples where the timestamp format is inconsistent")
if previous_timestamp_was_int is not None and previous_timestamp_was_int and not timestamp_is_int:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " contains tuples where the timestamp format is inconsistent")
if previous_timestamp_was_timestamp is not None and previous_timestamp_was_timestamp and not timestamp_is_timestamp:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " contains tuples where the timestamp format is inconsistent")
# Store the values
timestamps_for_dimension += [timestamp]
values_for_dimension += [value]
# If this was our first tuple then we store the type of timestamp we had
if previous_timestamp_was_timestamp is None and timestamp_is_timestamp:
previous_timestamp_was_timestamp = True
previous_timestamp_was_int = False
previous_timestamp_was_float = False
if previous_timestamp_was_int is None and timestamp_is_int:
previous_timestamp_was_timestamp = False
previous_timestamp_was_int = True
previous_timestamp_was_float = False
if previous_timestamp_was_float is None and timestamp_is_float:
previous_timestamp_was_timestamp = False
previous_timestamp_was_int = False
previous_timestamp_was_float = True
# See if we should add the data for this dimension
if not has_another_value:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
if timestamp_is_timestamp:
timestamps_for_dimension = pd.DatetimeIndex(timestamps_for_dimension)
instance_list[this_line_num_dimensions].append(
pd.Series(index=timestamps_for_dimension, data=values_for_dimension))
this_line_num_dimensions += 1
timestamps_for_dimension = []
values_for_dimension = []
elif has_another_value:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " ends with a ',' that is not followed by another tuple")
elif has_another_dimension and target_labels:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " ends with a ':' while it should list a class value")
elif has_another_dimension and not target_labels:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series(dtype=np.float32))
this_line_num_dimensions += 1
num_dimensions = this_line_num_dimensions
# If this is the 1st line of data we have seen then note the dimensions
if not has_another_value and not has_another_dimension:
if num_dimensions is None:
num_dimensions = this_line_num_dimensions
if num_dimensions != this_line_num_dimensions:
raise TsFileParseException("line " + str(
line_num + 1) + " does not have the same number of dimensions as the previous line of data")
# Check that we are not expecting some more data, and if not, store that processed above
if has_another_value:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " ends with a ',' that is not followed by another tuple")
elif has_another_dimension and target_labels:
raise TsFileParseException(
"dimension " + str(this_line_num_dimensions + 1) + " on line " + str(
line_num + 1) + " ends with a ':' while it should list a class value")
elif has_another_dimension and not target_labels:
if len(instance_list) < (this_line_num_dimensions + 1):
instance_list.append([])
instance_list[this_line_num_dimensions].append(pd.Series())
this_line_num_dimensions += 1
num_dimensions = this_line_num_dimensions
# If this is the 1st line of data we have seen then note the dimensions
if not has_another_value and num_dimensions != this_line_num_dimensions:
raise TsFileParseException("line " + str(
line_num + 1) + " does not have the same number of dimensions as the previous line of data")
# Check if we should have class values, and if so that they are contained in those listed in the metadata
if target_labels and len(class_val_list) == 0:
raise TsFileParseException("the cases have no associated class values")
else:
dimensions = line.split(":")
# If first row then note the number of dimensions (that must be the same for all cases)
if is_first_case:
num_dimensions = len(dimensions)
if target_labels:
num_dimensions -= 1
for dim in range(0, num_dimensions):
instance_list.append([])
is_first_case = False
# See how many dimensions that the case whose data in represented in this line has
this_line_num_dimensions = len(dimensions)
if target_labels:
this_line_num_dimensions -= 1
# All dimensions should be included for all series, even if they are empty
if this_line_num_dimensions != num_dimensions:
raise TsFileParseException("inconsistent number of dimensions. Expecting " + str(
num_dimensions) + " but have read " + str(this_line_num_dimensions))
# Process the data for each dimension
for dim in range(0, num_dimensions):
dimension = dimensions[dim].strip()
if dimension:
data_series = dimension.split(",")
data_series = [float(i) for i in data_series]
instance_list[dim].append(pd.Series(data_series))
else:
instance_list[dim].append(pd.Series())
if target_labels:
class_val_list.append(float(dimensions[num_dimensions].strip()))
line_num += 1
# Check that the file was not empty
if line_num:
# Check that the file contained both metadata and data
complete_regression_meta_data = has_problem_name_tag and has_timestamps_tag and has_univariate_tag and has_target_labels_tag and has_data_tag
complete_classification_meta_data = has_problem_name_tag and has_timestamps_tag and has_univariate_tag and has_class_labels_tag and has_data_tag
if metadata_started and not complete_regression_meta_data and not complete_classification_meta_data:
raise TsFileParseException("metadata incomplete")
elif metadata_started and not data_started:
raise TsFileParseException("file contained metadata but no data")
elif metadata_started and data_started and len(instance_list) == 0:
raise TsFileParseException("file contained metadata but no data")
# Create a DataFrame from the data parsed above
data = pd.DataFrame(dtype=np.float32)
for dim in range(0, num_dimensions):
data['dim_' + str(dim)] = instance_list[dim]
# Check if we should return any associated class labels separately
if target_labels:
if return_separate_X_and_y:
return data, np.asarray(class_val_list)
else:
data['class_vals'] = pd.Series(class_val_list)
return data
else:
return data
else:
raise TsFileParseException("empty file")
| state-spaces-main | src/dataloaders/prepare/bidmc/data_loader.py |
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import sktime
from sktime.datasets import load_from_tsfile_to_dataframe
import data_loader as data
DATA_PATH = "data/"
def split_data(
X_train_orig, y_train_orig, X_test_orig, y_test_orig, shuffle=True, seed=0
):
if shuffle:
X_all = pd.concat((X_train_orig, X_test_orig))
y_all = np.concatenate((y_train_orig, y_test_orig))
X_train, X_eval, y_train, y_eval = train_test_split(
X_all, y_all, test_size=0.3, random_state=seed
)
X_val, X_test, y_val, y_test = train_test_split(
X_eval, y_eval, test_size=0.5, random_state=seed + 1
)
else:
X_test, y_test = X_test_orig, y_test_orig
val_size = int(X_train_orig.shape[0] / 7.0) # .6 / .1 / .3 split
X_train, y_train = X_train_orig[:-val_size], y_train_orig[:-val_size]
X_val, y_val = X_train_orig[-val_size:], y_train_orig[-val_size:]
# X_train, X_val, y_train, y_val = train_test_split(X_train_orig, y_train_orig, test_size=0.20, random_state=seed)
return X_train, y_train, X_val, y_val, X_test, y_test
def _to_numpy(X):
""" Convert DataFrame of series into numpy array """
return np.stack([np.stack(x) for x in X.to_numpy()]).swapaxes(-1, -2)
def process_data(DATASET, shuffle=True, seed=0):
X_train_orig, y_train_orig = data.load_from_tsfile_to_dataframe(
os.path.join(f"{DATASET}/BIDMC32{DATASET}_TRAIN.ts"),
replace_missing_vals_with="NaN",
)
X_test_orig, y_test_orig = data.load_from_tsfile_to_dataframe(
os.path.join(f"{DATASET}/BIDMC32{DATASET}_TEST.ts"),
replace_missing_vals_with="NaN",
)
X_train, y_train, X_val, y_val, X_test, y_test = split_data(
X_train_orig, y_train_orig, X_test_orig, y_test_orig, shuffle=shuffle, seed=seed
)
split = "reshuffle" if shuffle else "original"
data_dir = os.path.join(DATASET, split)
os.makedirs(data_dir, exist_ok=True)
np.save(os.path.join(data_dir, "trainx.npy"), _to_numpy(X_train))
np.save(os.path.join(data_dir, "trainy.npy"), y_train)
np.save(os.path.join(data_dir, "validx.npy"), _to_numpy(X_val))
np.save(os.path.join(data_dir, "validy.npy"), y_val)
np.save(os.path.join(data_dir, "testx.npy"), _to_numpy(X_test))
np.save(os.path.join(data_dir, "testy.npy"), y_test)
for f in ["trainx", "trainy", "validx", "validy", "testx", "testy"]:
df = np.load(f"{DATASET}/{split}/{f}.npy")
print(f, df.shape, df.dtype)
if __name__ == "__main__":
for DATASET in ["RR", "HR", "SpO2"]:
process_data(DATASET, shuffle=True)
| state-spaces-main | src/dataloaders/prepare/bidmc/process_data.py |
"""Implementation of standard Copying dataset.
Originally used in Arjovsky's Unitary RNN, maybe earlier?
"""
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from src.utils import distributed
def np_copying_data(L, M, A, batch_shape=()):
seq = np.random.randint(low=1, high=A-1, size=batch_shape+(M,))
zeros_x = np.zeros(batch_shape+(L,))
markers = (A-1) * np.ones(batch_shape+(M,))
zeros_y = np.zeros(batch_shape+(M+L,))
x_ = np.concatenate([seq, zeros_x, markers], axis=-1)
y_ = np.concatenate([zeros_y, seq], axis=-1)
x = F.one_hot(torch.tensor(x_, dtype=torch.int64), A).float()
y = torch.tensor(y_, dtype=torch.int64)
return x, y
def torch_copying_data(L, M, A, variable=False, variable_length=False, batch_shape=(), one_hot=False, reverse=False):
if variable_length:
M = int(random.random() * M) + 1
tokens = torch.randint(low=1, high=A-1, size=batch_shape+(M,))
if variable:
total_batch = int(np.prod(batch_shape))
inds = torch.stack([
torch.randperm(L+M)[:M]
for _ in range(total_batch)
], 0)
inds = inds.reshape(batch_shape+(M,))
inds, _ = inds.sort()
else:
inds = torch.arange(M).repeat(batch_shape+(1,))
zeros_x = torch.zeros(batch_shape+(M+L,), dtype=torch.long)
zeros_x.scatter_(-1, inds, tokens)
markers = (A-1) * torch.ones(batch_shape+(M,), dtype=torch.long)
x_ = torch.cat([zeros_x, markers], dim=-1)
y_ = torch.cat([tokens], dim=-1)
if reverse: y_ = y_.flip(-1)
if one_hot: x = F.one_hot(x_, A).float()
else: x = x_
y = y_
return x, y
def torch_copying_lag_data(L, M, A, batch_shape=()):
x = torch.randint(low=1, high=A-1, size=batch_shape+(L,))
y = F.pad(x, (M, 0))[..., :L]
return x, y
class CopyingTrainDataset(torch.utils.data.Dataset):
def __init__(self, L, M, A, samples, lag=False, variable=False, variable_length=False, one_hot=False, reverse=False):
"""
L: number of noise tokens
M: number of memorization tokens
A: size of dictionary
"""
super().__init__()
self.L = L
self.M = M
self.A = A
self.samples = samples
self.variable = variable
self.variable_length = variable_length
self.one_hot = one_hot
self.lag = lag
self.reverse = reverse
def __getitem__(self, idx):
assert 0 <= idx < self.samples
if self.lag:
x, y = torch_copying_lag_data(self.L, self.M, self.A)
else:
x, y = torch_copying_data(self.L, self.M, self.A, variable=self.variable, variable_length=self.variable_length, one_hot=self.one_hot, reverse=self.reverse)
return x, y
def __len__(self):
return self.samples
class CopyingEvalDataset(torch.utils.data.TensorDataset):
def __init__(self, L, M, A, samples, lag=None, variable=False, variable_length=False, one_hot=False, reverse=False):
self.L = L
self.M = M
self.A = A
self.samples = samples
if lag:
all_x, all_y = torch_copying_lag_data(self.L, self.M, self.A, batch_shape=(self.samples,))
else:
all_x, all_y = torch_copying_data(self.L, self.M, self.A, batch_shape=(self.samples,), variable=variable, variable_length=False, one_hot=one_hot, reverse=reverse)
super().__init__(all_x, all_y)
def copying_static_dataset(L, M, A, variable, samples):
all_x, all_y = torch_copying_data(L, M, A, variable, batch_shape=(samples,))
print("Constructing Copying dataset of shape", all_x.shape)
ds = torch.utils.data.TensorDataset(all_x, all_y)
return ds
| state-spaces-main | src/dataloaders/datasets/copying.py |
"""Speech Commands dataset.
Adapted from https://github.com/dwromero/ckconv/blob/dc84dceb490cab2f2ddf609c380083367af21890/datasets/speech_commands.py
which is
adapted from https://github.com/patrick-kidger/NeuralCDE/blob/758d3a7134e3a691013e5cc6b7f68f277e9e6b69/experiments/datasets/speech_commands.py
"""
import os
import pathlib
import tarfile
import urllib.request
import sklearn.model_selection
import torch
import torch.nn.functional as F
import torchaudio
def pad(channel, maxlen):
channel = torch.tensor(channel)
out = torch.full((maxlen,), channel[-1])
out[: channel.size(0)] = channel
return out
def subsample(X, y, subsample_rate):
if subsample_rate != 1:
X = X[:, ::subsample_rate, :]
return X, y
def save_data(dir, **tensors):
for tensor_name, tensor_value in tensors.items():
torch.save(tensor_value, str(dir / tensor_name) + ".pt")
def load_data(dir):
tensors = {}
for filename in os.listdir(dir):
if filename.endswith(".pt"):
tensor_name = filename.split(".")[0]
tensor_value = torch.load(str(dir / filename))
tensors[tensor_name] = tensor_value
return tensors
def normalise_data(X, y):
train_X, _, _ = split_data(X, y)
out = []
for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)):
train_Xi_nonan = train_Xi.masked_select(~torch.isnan(train_Xi))
mean = train_Xi_nonan.mean() # compute statistics using only training data.
std = train_Xi_nonan.std()
out.append((Xi - mean) / (std + 1e-5))
out = torch.stack(out, dim=-1)
return out
def normalize_all_data(X_train, X_val, X_test):
for i in range(X_train.shape[-1]):
mean = X_train[:, :, i].mean()
std = X_train[:, :, i].std()
X_train[:, :, i] = (X_train[:, :, i] - mean) / (std + 1e-5)
X_val[:, :, i] = (X_val[:, :, i] - mean) / (std + 1e-5)
X_test[:, :, i] = (X_test[:, :, i] - mean) / (std + 1e-5)
return X_train, X_val, X_test
def minmax_scale(tensor):
min_val = torch.amin(tensor, dim=(1, 2), keepdim=True)
max_val = torch.amax(tensor, dim=(1, 2), keepdim=True)
return (tensor - min_val) / (max_val - min_val)
def mu_law_encode(audio, bits=8):
"""
Perform mu-law companding transformation.
"""
mu = torch.tensor(2**bits - 1)
# Audio must be min-max scaled between -1 and 1
audio = 2 * minmax_scale(audio) - 1
# Perform mu-law companding transformation.
numerator = torch.log1p(mu * torch.abs(audio))
denominator = torch.log1p(mu)
encoded = torch.sign(audio) * (numerator / denominator)
# Quantize signal to the specified number of levels.
return ((encoded + 1) / 2 * mu + 0.5).to(torch.int32)
def mu_law_decode(encoded, bits=8):
"""
Perform inverse mu-law transformation.
"""
mu = 2**bits - 1
# Invert the quantization
x = (encoded / mu) * 2 - 1
# Invert the mu-law transformation
x = torch.sign(x) * ((1 + mu)**(torch.abs(x)) - 1) / mu
return x
def split_data(tensor, stratify):
# 0.7/0.15/0.15 train/val/test split
(
train_tensor,
testval_tensor,
train_stratify,
testval_stratify,
) = sklearn.model_selection.train_test_split(
tensor,
stratify,
train_size=0.7,
random_state=0,
shuffle=True,
stratify=stratify,
)
val_tensor, test_tensor = sklearn.model_selection.train_test_split(
testval_tensor,
train_size=0.5,
random_state=1,
shuffle=True,
stratify=testval_stratify,
)
return train_tensor, val_tensor, test_tensor
class _SpeechCommands(torch.utils.data.TensorDataset):
SUBSET_CLASSES = [
"yes",
"no",
"up",
"down",
"left",
"right",
"on",
"off",
"stop",
"go",
]
ALL_CLASSES = [
"bed",
"cat",
"down",
"five",
"forward",
"go",
"house",
"left",
"marvin",
"no",
"on",
"right",
"sheila",
"tree",
"up",
"visual",
"yes",
"backward",
"bird",
"dog",
"eight",
"follow",
"four",
"happy",
"learn",
"nine",
"off",
"one",
"seven",
"six",
"stop",
"three",
"two",
"wow",
"zero",
]
def __init__(
self,
partition: str, # `train`, `val`, `test`
length: int, # sequence length
mfcc: bool, # whether to use MFCC features (`True`) or raw features
sr: int, # subsampling rate: default should be 1 (no subsampling); keeps every kth sample
dropped_rate: float, # rate at which samples are dropped, lies in [0, 100.]
path: str,
all_classes: bool = False,
gen: bool = False, # whether we are doing speech generation
discrete_input: bool = False, # whether we are using discrete inputs
):
self.dropped_rate = dropped_rate
self.all_classes = all_classes
self.gen = gen
self.discrete_input = discrete_input
self.root = pathlib.Path(path) # pathlib.Path("./data")
base_loc = self.root / "SpeechCommands" / "processed_data"
if mfcc:
data_loc = base_loc / "mfcc"
elif gen:
data_loc = base_loc / "gen"
else:
data_loc = base_loc / "raw"
if self.dropped_rate != 0:
data_loc = pathlib.Path(
str(data_loc) + "_dropped{}".format(self.dropped_rate)
)
if self.all_classes:
data_loc = pathlib.Path(str(data_loc) + "_all_classes")
if self.discrete_input:
data_loc = pathlib.Path(str(data_loc) + "_discrete")
if os.path.exists(data_loc):
pass
else:
self.download()
if not self.all_classes:
train_X, val_X, test_X, train_y, val_y, test_y = self._process_data(mfcc)
else:
train_X, val_X, test_X, train_y, val_y, test_y = self._process_all(mfcc)
if not os.path.exists(base_loc):
os.mkdir(base_loc)
if not os.path.exists(data_loc):
os.mkdir(data_loc)
save_data(
data_loc,
train_X=train_X,
val_X=val_X,
test_X=test_X,
train_y=train_y,
val_y=val_y,
test_y=test_y,
)
X, y = self.load_data(data_loc, partition) # (batch, length, 1)
if self.gen: y = y.transpose(1, 2)
if not mfcc and not self.gen:
X = F.pad(X, (0, 0, 0, length-16000))
# Subsample
if not mfcc:
X, y = subsample(X, y, sr)
if self.discrete_input:
X = X.long().squeeze()
super(_SpeechCommands, self).__init__(X, y)
def download(self):
root = self.root
base_loc = root / "SpeechCommands"
loc = base_loc / "speech_commands.tar.gz"
if os.path.exists(loc):
return
if not os.path.exists(root):
os.mkdir(root)
if not os.path.exists(base_loc):
os.mkdir(base_loc)
urllib.request.urlretrieve(
"http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz", loc
) # TODO: Add progress bar
with tarfile.open(loc, "r") as f:
def is_within_directory(directory, target):
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
prefix = os.path.commonprefix([abs_directory, abs_target])
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
raise Exception("Attempted Path Traversal in Tar File")
tar.extractall(path, members, numeric_owner=numeric_owner)
safe_extract(f, base_loc)
def _process_all(self, mfcc):
assert self.dropped_rate == 0, "Dropped rate must be 0 for all classes"
base_loc = self.root / "SpeechCommands"
with open(base_loc / "validation_list.txt", "r") as f:
validation_list = set([line.rstrip() for line in f])
with open(base_loc / "testing_list.txt", "r") as f:
testing_list = set([line.rstrip() for line in f])
train_X, val_X, test_X = [], [], []
train_y, val_y, test_y = [], [], []
batch_index = 0
y_index = 0
for foldername in self.ALL_CLASSES:
print(foldername)
loc = base_loc / foldername
for filename in os.listdir(loc):
audio, _ = torchaudio.load(
loc / filename, channels_first=False,
)
audio = (
audio / 2 ** 15
)
# Pad: A few samples are shorter than the full length
audio = F.pad(audio, (0, 0, 0, 16000 - audio.shape[0]))
if str(foldername + '/' + filename) in validation_list:
val_X.append(audio)
val_y.append(y_index)
elif str(foldername + '/' + filename) in testing_list:
test_X.append(audio)
test_y.append(y_index)
else:
train_X.append(audio)
train_y.append(y_index)
batch_index += 1
y_index += 1
# print("Full data: {} samples".format(len(X)))
train_X = torch.stack(train_X)
val_X = torch.stack(val_X)
test_X = torch.stack(test_X)
train_y = torch.tensor(train_y, dtype=torch.long)
val_y = torch.tensor(val_y, dtype=torch.long)
test_y = torch.tensor(test_y, dtype=torch.long)
# If MFCC, then we compute these coefficients.
if mfcc:
train_X = torchaudio.transforms.MFCC(
log_mels=True, n_mfcc=20, melkwargs=dict(n_fft=200, n_mels=64)
)(train_X.squeeze(-1)).detach()
val_X = torchaudio.transforms.MFCC(
log_mels=True, n_mfcc=20, melkwargs=dict(n_fft=200, n_mels=64)
)(val_X.squeeze(-1)).detach()
test_X = torchaudio.transforms.MFCC(
log_mels=True, n_mfcc=20, melkwargs=dict(n_fft=200, n_mels=64)
)(test_X.squeeze(-1)).detach()
# X is of shape (batch, channels=20, length=161)
else:
train_X = train_X.unsqueeze(1).squeeze(-1)
val_X = val_X.unsqueeze(1).squeeze(-1)
test_X = test_X.unsqueeze(1).squeeze(-1)
# X is of shape (batch, channels=1, length=16000)
# Normalize data
if mfcc:
train_X, val_X, test_X = normalize_all_data(train_X.transpose(1, 2), val_X.transpose(1, 2), test_X.transpose(1, 2))
train_X = train_X.transpose(1, 2)
val_X = val_X.transpose(1, 2)
test_X = test_X.transpose(1, 2)
else:
train_X, val_X, test_X = normalize_all_data(train_X, val_X, test_X)
# Print the shape of all tensors in one line
print(
"Train: {}, Val: {}, Test: {}".format(
train_X.shape, val_X.shape, test_X.shape
)
)
return (
train_X,
val_X,
test_X,
train_y,
val_y,
test_y,
)
def _process_data(self, mfcc):
base_loc = self.root / "SpeechCommands"
if self.gen:
X = torch.empty(35628, 16000, 1)
y = torch.empty(35628, dtype=torch.long)
else:
X = torch.empty(34975, 16000, 1)
y = torch.empty(34975, dtype=torch.long)
batch_index = 0
y_index = 0
for foldername in self.SUBSET_CLASSES:
loc = base_loc / foldername
for filename in os.listdir(loc):
audio, _ = torchaudio.load(
loc / filename, channels_first=False,
)
# audio, _ = torchaudio.load_wav(
# loc / filename, channels_first=False, normalization=False
# ) # for forward compatbility if they fix it
audio = (
audio / 2 ** 15
) # Normalization argument doesn't seem to work so we do it manually.
# A few samples are shorter than the full length; for simplicity we discard them.
if len(audio) != 16000:
continue
X[batch_index] = audio
y[batch_index] = y_index
batch_index += 1
y_index += 1
if self.gen:
assert batch_index == 35628, "batch_index is {}".format(batch_index)
else:
assert batch_index == 34975, "batch_index is {}".format(batch_index)
# If MFCC, then we compute these coefficients.
if mfcc:
X = torchaudio.transforms.MFCC(
log_mels=True, n_mfcc=20, melkwargs=dict(n_fft=200, n_mels=64)
)(X.squeeze(-1)).detach()
# X is of shape (batch=34975, channels=20, length=161)
else:
X = X.unsqueeze(1).squeeze(-1)
# X is of shape (batch=34975, channels=1, length=16000)
# If dropped is different than zero, randomly drop that quantity of data from the dataset.
if self.dropped_rate != 0:
generator = torch.Generator().manual_seed(56789)
X_removed = []
for Xi in X:
removed_points = (
torch.randperm(X.shape[-1], generator=generator)[
: int(X.shape[-1] * float(self.dropped_rate) / 100.0)
]
.sort()
.values
)
Xi_removed = Xi.clone()
Xi_removed[:, removed_points] = float("nan")
X_removed.append(Xi_removed)
X = torch.stack(X_removed, dim=0)
# Normalize data
if mfcc:
X = normalise_data(X.transpose(1, 2), y).transpose(1, 2)
else:
X = normalise_data(X, y)
# Once the data is normalized append times and mask values if required.
if self.dropped_rate != 0:
# Get mask of possitions that are deleted
mask_exists = (~torch.isnan(X[:, :1, :])).float()
X = torch.where(~torch.isnan(X), X, torch.Tensor([0.0]))
X = torch.cat([X, mask_exists], dim=1)
train_X, val_X, test_X = split_data(X, y)
train_y, val_y, test_y = split_data(y, y)
if self.gen:
train_y, val_y, test_y = train_X, val_X, test_X
train_y, val_y, test_y = mu_law_encode(train_y), mu_law_encode(val_y), mu_law_encode(test_y)
# train_X, val_X, test_X = train_X[..., :-1], val_X[..., :-1], test_X[..., :-1]
# # Prepend zero to train_X, val_X, test_X
# train_X = torch.cat([torch.zeros(train_X.shape[0], 1, train_X.shape[2]), train_X], dim=1)
# train_X, val_X, test_X = torch.roll(train_X, 1, 2), torch.roll(val_X, 1, 2), torch.roll(test_X, 1, 2)
if not self.discrete_input:
train_X, val_X, test_X = torch.roll(mu_law_decode(train_y), 1, 2), torch.roll(mu_law_decode(val_y), 1, 2), torch.roll(mu_law_decode(test_y), 1, 2)
else:
train_X, val_X, test_X = torch.roll(train_y, 1, 2), torch.roll(val_y, 1, 2), torch.roll(test_y, 1, 2)
train_X[..., 0], val_X[..., 0], test_X[..., 0] = 0, 0, 0
assert(train_y.shape == train_X.shape)
return (
train_X,
val_X,
test_X,
train_y,
val_y,
test_y,
)
@staticmethod
def load_data(data_loc, partition):
tensors = load_data(data_loc)
if partition == "train":
X = tensors["train_X"]
y = tensors["train_y"]
elif partition == "val":
X = tensors["val_X"]
y = tensors["val_y"]
elif partition == "test":
X = tensors["test_X"]
y = tensors["test_y"]
else:
raise NotImplementedError("the set {} is not implemented.".format(set))
return X.transpose(1, 2), y
class _SpeechCommandsGeneration(_SpeechCommands):
SUBSET_CLASSES = [
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
]
def __init__(
self,
partition: str, # `train`, `val`, `test`
length: int, # sequence length
mfcc: bool, # whether to use MFCC features (`True`) or raw features
sr: int, # subsampling rate: default should be 1 (no subsampling); keeps every kth sample
dropped_rate: float, # rate at which samples are dropped, lies in [0, 100.]
path: str,
all_classes: bool = False,
discrete_input: bool = False,
):
super(_SpeechCommandsGeneration, self).__init__(
partition = partition,
length = length,
mfcc = mfcc,
sr = sr,
dropped_rate = dropped_rate,
path = path,
all_classes = all_classes,
gen = True,
discrete_input = discrete_input,
)
| state-spaces-main | src/dataloaders/datasets/sc.py |
"""RNN Vocal Generation Model.
Blizzard, Music, and Huckleberry Finn data feeders.
"""
import numpy as np
#import scikits.audiolab
import random
import time
import os
import glob
import torch
import sklearn
from scipy.io import wavfile
def normalize01(data):
"""To range [0., 1.]"""
data -= np.min(data)
data /= np.max(data)
return data
def mu_law_encode(audio, bits=8):
"""
Perform mu-law companding transformation.
"""
mu = torch.tensor(2**bits - 1)
# Audio must be min-max scaled between -1 and 1
audio = 2 * minmax_scale(audio) - 1
# Perform mu-law companding transformation.
numerator = torch.log1p(mu * torch.abs(audio + 1e-8))
denominator = torch.log1p(mu)
encoded = torch.sign(audio) * (numerator / denominator)
# Quantize signal to the specified number of levels.
return ((encoded + 1) / 2 * mu + 0.5).long()
def mu_law_decode(encoded, bits=8):
"""
Perform inverse mu-law transformation.
"""
mu = 2**bits - 1
# Invert the quantization
x = (encoded.float() / mu) * 2 - 1
# Invert the mu-law transformation
x = torch.sign(x) * ((1 + mu)**(torch.abs(x)) - 1) / mu
return x
def minmax_scale(tensor):
min_val = torch.amin(tensor, dim=(1, 2), keepdim=True)
max_val = torch.amax(tensor, dim=(1, 2), keepdim=True)
return (tensor - min_val) / (max_val - min_val + 1e-6)
EPSILON = 1e-2
def linear_quantize(samples, q_levels):
samples = samples.clone()
# samples -= samples.min(dim=-2)[0].unsqueeze(1).expand_as(samples)
# samples /= samples.max(dim=-2)[0].unsqueeze(1).expand_as(samples)
samples = minmax_scale(samples)
samples *= q_levels - EPSILON
samples += EPSILON / 2
return samples.long()
def linear_dequantize(samples, q_levels):
return samples.float() / (q_levels / 2) - 1
def q_zero(q_levels):
return q_levels // 2
ITEM_LIST = [
"BeethovenPianoSonataNo.1",
"BeethovenPianoSonataNo.2",
"BeethovenPianoSonataNo.3",
"BeethovenPianoSonataNo.4",
"BeethovenPianoSonataNo.5",
"BeethovenPianoSonataNo.6",
"BeethovenPianoSonataNo.7",
"BeethovenPianoSonataNo.8",
"BeethovenPianoSonataNo.9",
"BeethovenPianoSonataNo.10",
"BeethovenPianoSonataNo.11",
"BeethovenPianoSonataNo.12",
"BeethovenPianoSonata13",
"BeethovenPianoSonataNo.14moonlight",
"BeethovenPianoSonata15",
"BeethovenPianoSonata16",
"BeethovenPianoSonata17",
"BeethovenPianoSonataNo.18",
"BeethovenPianoSonataNo.19",
"BeethovenPianoSonataNo.20",
"BeethovenPianoSonataNo.21Waldstein",
"BeethovenPianoSonata22",
"BeethovenPianoSonataNo.23",
"BeethovenPianoSonataNo.24",
"BeethovenPianoSonataNo.25",
"BeethovenPianoSonataNo.26",
"BeethovenPianoSonataNo.27",
"BeethovenPianoSonataNo.28",
"BeethovenPianoSonataNo.29",
"BeethovenPianoSonataNo.30",
"BeethovenPianoSonataNo.31",
"BeethovenPianoSonataNo.32",
]
def download_all_data(path):
print('Downloading data to ' + path)
if not os.path.exists(path):
os.system('mkdir ' + path)
for item in ITEM_LIST:
os.system("wget -r -H -nc -nH --cut-dir=1 -A .ogg -R *_vbr.mp3 -e robots=off -P " + path + " -l1 'http://archive.org/download/" + item + "'")
os.system("mv " + os.path.join(path, item, '*.ogg') + " " + path)
os.system("rm -rf " + os.path.join(path, item))
for f in os.listdir(path):
filepath = os.path.join(path, f)
os.system("ffmpeg -y -i " + filepath + " -ar 16000 -ac 1 " + filepath[:-4] + ".wav")
os.system("rm " + filepath)
print('Data download done')
class _Music():
def __init__(
self,
path,
sample_len = 1, # in seconds
sample_rate = 16000,
train_percentage = 0.9,
discrete_input=False,
samplernn_proc=True,
):
self.sample_len = sample_len
self.sample_rate = sample_rate
self.discrete_input = discrete_input
self.samplernn_proc = samplernn_proc
self.music_data_path = os.path.join(path, 'music_data')
if not os.path.exists(self.music_data_path):
download_all_data(self.music_data_path)
self.all_data = self.get_all_data()
self.tensor = self.build_slices(self.all_data)
self.train, self.val, self.test = self.split_data(self.tensor, train_percentage)
self.train_X, self.val_X, self.test_X, self.train_y, self.val_y, self.test_y = self.make_x_y(self.train, self.val, self.test)
def get_all_data(self):
from librosa.core import load
# TODO: There are going to be boundary errors here!
all_data = np.array([])
for f in os.listdir(self.music_data_path):
# sr, data = wavfile.read(os.path.join(self.music_data_path, f))
data, _ = load(os.path.join(self.music_data_path, f), sr=None, mono=True)
# assert(sr == self.sample_rate)
all_data = np.append(all_data, data)
# # if not self.samplernn_proc:
# # Convert all data to range [-1, 1]
# all_data = all_data.astype('float64')
# all_data = normalize01(all_data)
# all_data = 2. * all_data - 1.
return all_data
def build_slices(self, data):
num_samples_per_slice = self.sample_rate * self.sample_len
truncated_len = len(data) - len(data) % num_samples_per_slice
return torch.tensor(data[:truncated_len].reshape(-1, num_samples_per_slice), dtype=torch.float32)
# tensor = torch.zeros([len(data) // num_samples_per_slice, num_samples_per_slice], dtype=torch.float32)
# for i in range(len(data) // num_samples_per_slice):
# tensor[i] = torch.tensor(data[i * num_samples_per_slice : (i + 1) * num_samples_per_slice])
# return tensor
def split_data(self, tensor, train_percentage):
train, test = sklearn.model_selection.train_test_split(
tensor,
train_size=train_percentage,
random_state=0,
shuffle=True
)
val, test = sklearn.model_selection.train_test_split(
test,
train_size=0.5,
random_state=0,
shuffle=True
)
train = torch.swapaxes(train.unsqueeze(1).squeeze(-1), 1, 2)
val = torch.swapaxes(val.unsqueeze(1).squeeze(-1), 1, 2)
test = torch.swapaxes(test.unsqueeze(1).squeeze(-1), 1, 2)
return train, val, test
def make_x_y(self, train, val, test):
if not self.samplernn_proc:
train_y, val_y, test_y = mu_law_encode(train), mu_law_encode(val), mu_law_encode(test)
if not self.discrete_input:
train_X, val_X, test_X = torch.roll(mu_law_decode(train_y), 1, 1), torch.roll(mu_law_decode(val_y), 1, 1), torch.roll(mu_law_decode(test_y), 1, 1)
train_X[:, 0, :], val_X[:, 0, :], test_X[:, 0, :] = 0, 0, 0
else:
train_X, val_X, test_X = torch.roll(train_y, 1, 1), torch.roll(val_y, 1, 1), torch.roll(test_y, 1, 1)
train_X[:, 0, :], val_X[:, 0, :], test_X[:, 0, :] = 128, 128, 128
else:
train_y, val_y, test_y = linear_quantize(train, 256), linear_quantize(val, 256), linear_quantize(test, 256)
# train_y, val_y, test_y = mu_law_encode(train), mu_law_encode(val), mu_law_encode(test)
if not self.discrete_input:
raise NotImplementedError
else:
train_X, val_X, test_X = torch.roll(train_y, 1, 1), torch.roll(val_y, 1, 1), torch.roll(test_y, 1, 1)
train_X[:, 0, :], val_X[:, 0, :], test_X[:, 0, :] = 128, 128, 128
return train_X, val_X, test_X, train_y, val_y, test_y
def get_data(self, partition):
if partition == 'train':
return MusicTensorDataset(self.train_X, self.train_y)
elif partition == 'val':
return MusicTensorDataset(self.val_X, self.val_y)
elif partition == 'test':
return MusicTensorDataset(self.test_X, self.test_y)
class MusicTensorDataset(torch.utils.data.TensorDataset):
def __getitem__(self, index):
data = self.tensors[0][index]
target = self.tensors[1][index]
if data.dtype == torch.float32:
return data, target
else:
return data.squeeze(-1), target
# Rejection sampling to remove "bad samples" that are essentially constant audio
# if data.dtype == torch.float32:
# if torch.std(data[1:]) < 1e-5:
# return self.__getitem__(np.random.randint(0, len(self.tensors[0])))
# return data, target
# else:
# if (data[1:] - data[1]).abs().sum() < 1e-5:
# return self.__getitem__(np.random.randint(0, len(self.tensors[0])))
# return data.squeeze(-1), target
| state-spaces-main | src/dataloaders/datasets/music.py |
"""Implementation of Celeb-A dataset."""
from functools import partial
import torch
import os
import PIL
from typing import Any, Callable, List, Optional, Union, Tuple
from torchvision.datasets import VisionDataset
try:
import gdown
DOWNLOAD = True
except ImportError:
DOWNLOAD = False
import numpy as np
class _CelebA(VisionDataset):
"""`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
split (string): One of {'train', 'valid', 'test', 'all'}.
Accordingly dataset is selected.
target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,
or ``landmarks``. Can also be a list to output a tuple with all specified target types.
The targets represent:
- ``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes
- ``identity`` (int): label for each person (data points with the same identity are the same person)
- ``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height)
- ``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
Defaults to ``attr``. If empty, ``None`` will be returned as target.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = "celeba"
file_list = [
# File ID MD5 Hash Filename
("1cNIac61PSA_LqDFYFUeyaQYekYPc75NH", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
]
def __init__(
self,
root: str,
task: str = None,
split: str = "train",
target_type: Union[List[str], str] = "attr",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
import pandas
super(_CelebA, self).__init__(root, transform=transform,
target_transform=target_transform)
self.split = split
if isinstance(target_type, list):
self.target_type = target_type
else:
self.target_type = [target_type]
if not self.target_type and self.target_transform is not None:
raise RuntimeError('target_transform is specified but target_type is empty')
if download:
self.download()
split_map = {
"train": 0,
"valid": 1,
"test": 2,
"all": None,
"hq": None,
}
split_ = split_map[split]
if split == 'hq':
fn = partial(os.path.join, self.root)
else:
fn = partial(os.path.join, self.root, self.base_folder)
splits = pandas.read_csv(fn("list_eval_partition.csv"), header=0, index_col=0)
attr = pandas.read_csv(fn("list_attr_celeba.csv"), header=0, index_col=0)
mask = slice(None) if split_ is None else (splits['partition'] == split_)
if split == 'hq':
filenames = os.listdir(fn('train')) + os.listdir(fn('val'))
self.filename = [fn('train', f) for f in os.listdir(fn('train'))] + [fn('val', f) for f in os.listdir(fn('val'))]
self.attr = torch.as_tensor(attr.loc[filenames].values)
else:
self.filename = splits[mask].index.values
self.attr = torch.as_tensor(attr[mask].values)
self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
self.attr_names = list(attr.columns)
self.task = task
if task:
self.task_idx = int(np.where(np.array(self.attr_names) == task)[0])
def download(self) -> None:
import zipfile
if not DOWNLOAD:
raise ImportError("Must install gdown.")
if os.path.exists(os.path.join(self.root, self.base_folder, 'img_align_celeba')):
print('Files already downloaded and verified')
return
for (file_id, md5, filename) in self.file_list:
gdown.download(f'https://drive.google.com/uc?id={file_id}', os.path.join(self.root, self.base_folder, filename), quiet=False)
with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f:
f.extractall(os.path.join(self.root, self.base_folder))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
if self.split == 'hq':
X = PIL.Image.open(self.filename[index])
else:
X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
target: Any = []
for t in self.target_type:
if t == "attr":
target.append(self.attr[index, :])
elif t == "identity":
target.append(self.identity[index, 0])
elif t == "bbox":
target.append(self.bbox[index, :])
elif t == "landmarks":
target.append(self.landmarks_align[index, :])
else:
# TODO: refactor with utils.verify_str_arg
raise ValueError("Target type \"{}\" is not recognized.".format(t))
if self.transform is not None:
X = self.transform(X)
if target:
target = tuple(target) if len(target) > 1 else target[0]
if self.target_transform is not None:
target = self.target_transform(target)
else:
target = None
if self.task:
return X, torch.eye(2, dtype=int)[target[self.task_idx]]
return X, target # torch.eye(2, dtype=int)[target]
def __len__(self) -> int:
return len(self.attr)
def extra_repr(self) -> str:
lines = ["Target type: {target_type}", "Split: {split}"]
return '\n'.join(lines).format(**self.__dict__)
| state-spaces-main | src/dataloaders/datasets/celeba.py |
"""Implementation of standard Adding dataset.
Originally used in Arjovsky's Unitary RNN, maybe earlier?
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def torch_adding_data(L, batch_shape=()):
assert L >= 2
mid = L//2
idx0 = torch.randint(low=0, high=mid, size=batch_shape)
idx1 = torch.randint(low=0, high=L-mid, size=batch_shape)
idx = torch.cat((F.one_hot(idx0, mid), F.one_hot(idx1, L-mid)), dim=-1).float() # (batch_shape, L)
unif = torch.empty(batch_shape+(L,))
unif.uniform_(0., 1.)
x = torch.stack((unif, idx), dim=-1) # (batch_shape, L, 2)
y = torch.sum(unif*idx, dim=-1, keepdim=True) # (batch_shape, 1)
return x, y
def adding_static_dataset(L, samples):
all_x, all_y = torch_adding_data(L, batch_shape=(samples,))
print("Constructing Adding dataset of shape", all_x.shape)
ds = torch.utils.data.TensorDataset(all_x, all_y)
return ds
| state-spaces-main | src/dataloaders/datasets/adding.py |
"""Implementation of "Continuous Delay" dataset from How to Train Your HIPPO."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.dataloaders.utils.signal import whitesignal
class DelayTrainDataset(torch.utils.data.Dataset):
def __init__(self, samples, l_seq=1024, n_lag=1, l_lag=None, dt=1e-3, freq=1.0):
"""
"""
super().__init__()
self.L = l_seq
self.dt = dt
self.freq = freq
self.samples = samples
self.l_lag = l_lag or l_seq // n_lag
self.n_lag = n_lag
def __getitem__(self, idx):
assert 0 <= idx < self.samples
x = torch.FloatTensor(whitesignal(self.L*self.dt, self.dt, self.freq)) # (l_seq)
y = torch.stack([
F.pad(x[:self.L-i*self.l_lag], (i*self.l_lag, 0))
for i in range(1, self.n_lag+1)
], dim=-1) # (l_seq, n_lag)
x = x.unsqueeze(-1)
return x, y
def __len__(self):
return self.samples
class DelayEvalDataset(torch.utils.data.TensorDataset):
def __init__(self, samples, l_seq=1024, n_lag=1, l_lag=None, dt=1e-3, freq=1.0):
self.L = l_seq
self.dt = dt
self.freq = freq
self.samples = samples
self.l_lag = l_lag or l_seq // n_lag
self.n_lag = n_lag
X = torch.FloatTensor(whitesignal(self.L*self.dt, self.dt, self.freq, batch_shape=(self.samples,))) # (samples, l_seq, 1)
Y = torch.stack([
F.pad(X[:, :self.L-i*self.l_lag], (i*self.l_lag, 0)) # manually subtract from self.L otherwise error in i=0 case
for i in range(1, self.n_lag+1)
], dim=-1) # (batch, l_seq, n_lag)
X = X.unsqueeze(-1) # (batch, l_seq, 1)
super().__init__(X, Y)
| state-spaces-main | src/dataloaders/datasets/delay.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.dataloaders.utils.signal import whitesignal
class ReconstructTrainDataset(torch.utils.data.Dataset):
def __init__(self, samples, l_seq=1024, l_mem=1024, dt=1e-3, freq=1.0, seed=0):
"""
"""
super().__init__()
self.L = l_seq
self.l_mem = l_mem
self.dt = dt
self.freq = freq
self.samples = samples
def __getitem__(self, idx):
assert 0 <= idx < self.samples
x = torch.FloatTensor(whitesignal(self.L*self.dt, self.dt, self.freq))
x = x.unsqueeze(-1)
y = x[-self.l_mem:, 0]
return x, y
def __len__(self):
return self.samples
class ReconstructEvalDataset(torch.utils.data.TensorDataset):
def __init__(self, samples, l_seq=1024, l_mem=1024, dt=1e-3, freq=1.0, seed=0):
self.L = l_seq
self.l_mem = l_mem
self.dt = dt
self.freq = freq
self.samples = samples
X = []
X = torch.FloatTensor(whitesignal(self.L*self.dt, self.dt, self.freq, batch_shape=(self.samples,)))
X = X[..., None]
Y = X[:, -self.l_mem:, 0]
super().__init__(X, Y)
| state-spaces-main | src/dataloaders/datasets/reconstruct.py |
"""Data utilities for generating signals."""
import numpy as np
def whitesignal(period, dt, freq, rms=0.5, batch_shape=()):
"""
Produces output signal of length period / dt, band-limited to frequency freq
Output shape (*batch_shape, period/dt)
Adapted from the nengo library
"""
if freq is not None and freq < 1. / period:
raise ValueError(f"Make ``{freq=} >= 1. / {period=}`` to produce a non-zero signal",)
nyquist_cutoff = 0.5 / dt
if freq > nyquist_cutoff:
raise ValueError(f"{freq} must not exceed the Nyquist frequency for the given dt ({nyquist_cutoff:0.3f})")
n_coefficients = int(np.ceil(period / dt / 2.))
shape = batch_shape + (n_coefficients + 1,)
sigma = rms * np.sqrt(0.5)
coefficients = 1j * np.random.normal(0., sigma, size=shape)
coefficients[..., -1] = 0.
coefficients += np.random.normal(0., sigma, size=shape)
coefficients[..., 0] = 0.
set_to_zero = np.fft.rfftfreq(2 * n_coefficients, d=dt) > freq
coefficients *= (1-set_to_zero)
power_correction = np.sqrt(1. - np.sum(set_to_zero, dtype=float) / n_coefficients)
if power_correction > 0.: coefficients /= power_correction
coefficients *= np.sqrt(2 * n_coefficients)
signal = np.fft.irfft(coefficients, axis=-1)
signal = signal - signal[..., :1] # Start from 0
return signal
| state-spaces-main | src/dataloaders/utils/signal.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.