python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
number_datapoints = 50000
number_timesteps = 20
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
b_array_dataset[i,0] = 1.
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
y_array_dataset[i,t] = np.random.random_integers(0,high=3)
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
# embed()
np.save("X_array_continuous.npy",x_array_dataset)
np.save("Y_array_continuous.npy",y_array_dataset)
np.save("B_array_continuous.npy",b_array_dataset)
np.save("A_array_continuous.npy",a_array_dataset) | CausalSkillLearning-main | DataGenerator/ContinuousTrajs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
number_datapoints = 50000
number_timesteps = 20
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
b_array_dataset[i,0] = 1.
x_array_dataset[i,0] = 5*(np.random.random((2))-0.5)
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
y_array_dataset[i,t] = np.random.random_integers(0,high=3)
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
# -0.05 is because the noise is from 0-0.1, so to balance this we make it -0.05
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
# embed()
np.save("X_array_continuous_nonzero.npy",x_array_dataset)
np.save("Y_array_continuous_nonzero.npy",y_array_dataset)
np.save("B_array_continuous_nonzero.npy",b_array_dataset)
np.save("A_array_continuous_nonzero.npy",a_array_dataset) | CausalSkillLearning-main | DataGenerator/ContinuousNonZero.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
import matplotlib.pyplot as plt
#number_datapoints = 20
number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
lim = 25
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
# b_array_dataset[i,0] = 1.
goal_array_dataset[i] = np.random.random_integers(0,high=3)
# Adding random noise to start state.
x_array_dataset[i,-1] = goal_states[goal_array_dataset[i]] + 0.1*(np.random.random(2)-0.5)
goal = goal_states[goal_array_dataset[i]]
reset_counter = 0
# for t in range(number_timesteps-1):
for t in reversed(range(number_timesteps-1)):
# GET B # Must end on b==0.
if t<(number_timesteps-2):
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if t<3:
b_array_dataset[i,t] = 0
elif reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
elif t==(number_timesteps-2):
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
current_state = x_array_dataset[i,t+1]
unnorm_directions = current_state-goal.squeeze(0)
directions = unnorm_directions/abs(unnorm_directions)
# Set valid options.
dot_product = np.dot(action_map, directions)
# valid_options = np.where(dot_product>=0)[0]
# Sincer we're going backwards in time,
valid_options = np.where(dot_product<=0)[0]
# Compare states. If x-g_x>y_g_y, choose to go along...
# embed()
# y_array_dataset[i,t] = np.random.choice(valid_options)
y_array_dataset[i,t] = valid_options[np.argmax(np.dot(action_map,unnorm_directions)[valid_options])]
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t+1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
# x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
x_array_dataset[i,t] = x_array_dataset[i,t+1]-a_array_dataset[i,t]
plt.scatter(goal_states[:,0],goal_states[:,1],s=50)
plt.scatter(x_array_dataset[i,:,0],x_array_dataset[i,:,1],cmap='jet',c=range(25))
plt.xlim(-lim, lim)
plt.ylim(-lim, lim)
plt.show()
# Roll over b's.
b_array_dataset = np.roll(b_array_dataset,1,axis=1)
np.save("X_deter_goal_directed.npy",x_array_dataset)
np.save("Y_deter_goal_directed.npy",y_array_dataset)
np.save("B_deter_goal_directed.npy",b_array_dataset)
np.save("A_deter_goal_directed.npy",a_array_dataset)
np.save("G_deter_goal_directed.npy",goal_array_dataset)
| CausalSkillLearning-main | DataGenerator/DeterministicGoalDirectedTraj.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
import matplotlib.pyplot as plt
number_datapoints = 1
# number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
# b_array_dataset[i,0] = 1.
goal_array_dataset[i] = np.random.random_integers(0,high=3)
# Adding random noise to start state.
x_array_dataset[i,-1] = goal_states[goal_array_dataset[i]] + 0.1*(np.random.random(2)-0.5)
goal = goal_states[goal_array_dataset[i]]
reset_counter = 0
# for t in range(number_timesteps-1):
for t in reversed(range(number_timesteps-1)):
# GET B # Must end on b==0.
if t<(number_timesteps-2):
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if t<3:
b_array_dataset[i,t] = 0
elif reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
elif t==(number_timesteps-2):
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
current_state = x_array_dataset[i,t+1]
# directions = current_state-goal.squeeze(0)
directions = goal.squeeze(0)-current_state
norm_directions = directions/abs(directions)
# # Set valid options.
dot_product = np.dot(action_map, norm_directions)
# valid_options = np.where(dot_product>=0)[0]
# # Sincer we're going backwards in time,
valid_options = np.where(dot_product<=0)[0]
# # axes = -goal/abs(goal)
# # step1 = 30*np.ones((2))-axes*np.abs(x_array_dataset[i,t]-x_array_dataset[i,0])
# # # baseline = t*20*np.sqrt(2)/20
# # baseline = t
# # step2 = step1-baseline
# # step3 = step2/step2.sum()
# # y_array_dataset[i,t] = np.random.choice(valid_options[goal_array_dataset[i][0]])
# embed()
dot_product = np.dot(action_map,directions)
y_array_dataset[i,t] = np.argmax(dot_product)
# y_array_dataset[i,t] = np.random.choice(valid_options)
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t+1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
# x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
x_array_dataset[i,t] = x_array_dataset[i,t+1]-a_array_dataset[i,t]
plt.scatter(goal_states[:,0],goal_states[:,1],s=50)
plt.scatter(x_array_dataset[i,:,0],x_array_dataset[i,:,1],cmap='jet',c=range(25))
plt.xlim(-25,25)
plt.ylim(-25,25)
plt.show()
# Roll over b's.
b_array_dataset = np.roll(b_array_dataset,1,axis=1)
np.save("X_goal_directed.npy",x_array_dataset)
np.save("Y_goal_directed.npy",y_array_dataset)
np.save("B_goal_directed.npy",b_array_dataset)
np.save("A_goal_directed.npy",a_array_dataset)
np.save("G_goal_directed.npy",goal_array_dataset)
| CausalSkillLearning-main | DataGenerator/GoalDirectedTrajs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np, copy
from IPython import embed
import matplotlib.pyplot as plt
number_datapoints = 20
# number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
# action_map = np.array([[-1,0],[0,-1],[1,0],[0,1]])
# start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
# Creating a policy map.
size = 9
scale = 5
policy_map = np.zeros((size,size),dtype=int)
# Row wise assignment:
policy_map[0,:] = 2
policy_map[1,:7] = 2
policy_map[1,7:] = 1
policy_map[2:4,0] = 2
policy_map[2:4,1:4] = 3
policy_map[2:4,4:7] = 2
policy_map[2:4,7:] = 1
policy_map[4,:4] = 3
policy_map[4,4] = 3
policy_map[4,5:] = 1
policy_map[5,:3] = 3
policy_map[5,3:5] = 0
policy_map[5,5:] = 1
policy_map[6,:2] = 3
policy_map[6,2:7] = 0
policy_map[6,7:] = 1
policy_map[7:,0] = 3
policy_map[7:,1:7] = 0
policy_map[7:,7:] = 1
policy_map = np.transpose(policy_map)
# x = np.meshgrid(range(9),range(9))
x = np.meshgrid(np.arange(9),np.arange(9))
dxdy = action_map[policy_map[x[0],x[1]]]
traj = np.zeros((10,2))
traj[0] = [0,8]
for t in range(9):
# embed()
action_index = policy_map[int(traj[t,0]),int(traj[t,1])]
action = action_map[action_index]
traj[t+1] = traj[t] + action
print(action_index, action)
plt.ylim(9,-1)
plt.plot(traj[:,0],traj[:,1],'or')
plt.plot(traj[:,0],traj[:,1],'r')
plt.scatter(x[0],x[1])
for i in range(9):
for j in range(9):
plt.arrow(x[0][i,j],x[1][i,j],0.1*dxdy[i,j,0],0.1*dxdy[i,j,1],width=0.01)
plt.show()
# embed()
# Transformed vis.
size = 9
scale = 5
scaled_size = scale*size
# policy_map = np.flipud(np.transpose(policy_map))
policy_map = np.transpose(policy_map)
# goal_based_policy_maps = np.zeros((4,size,size),dtype=int)
# goal_based_policy_maps[0] = copy.deepcopy(policy_map)
# goal_based_policy_maps[1] = np.rot90(policy_map)
# goal_based_policy_maps[2] = np.rot90(policy_map,k=2)
# goal_based_policy_maps[3] = np.rot90(policy_map,k=3)
def get_bucket(state, reference_state):
# baseline = 4*np.ones(2)
baseline = np.zeros(2)
compensated_state = state - reference_state
# compensated_state = (np.round(state - reference_state) + baseline).astype(int)
scaled_size = scale*size
x = (np.arange(-(size-1)/2,(size-1)/2+1)-0.5)*scale
bucket = np.zeros((2))
bucket[0] = min(np.searchsorted(x,compensated_state[0]),size-1)
bucket[1] = min(np.searchsorted(x,compensated_state[1]),size-1)
return bucket.astype(int)
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*10
# goal_index = 1
# # meshrange = np.arange(-scaled_size/2,scaled_size/2+1,5)
# meshrange = (np.arange(-(size-1)/2,(size-1)/2+1)-0.5)*scale
# evalrange = (np.arange(-(size-1)/2,(size-1)/2+1)-1)*scale
# x = np.meshgrid(goal_states[goal_index,0]+meshrange,goal_states[goal_index,1]+meshrange)
# dxdy = np.zeros((9,9,2))
# # dxdy = action_map[policy_map[x[0],x[1]]]
# plt.scatter(x[0],x[1])
# plt.ylim(50,-50)
# arr = np.zeros((9,9,2))
# for i in range(9):
# for j in range(9):
# a = goal_states[goal_index,0]+evalrange[i]
# b = goal_states[goal_index,1]+evalrange[j]
# bucket = get_bucket(np.array([a,b]), goal_states[goal_index])
# arr[i,j,0] = i
# arr[i,j,1] = j
# dxdy[bucket[0],bucket[1]] = action_map[policy_map[bucket[0],bucket[1]]]
# plt.arrow(x[0][i,j],x[1][i,j],0.1*dxdy[i,j,0],0.1*dxdy[i,j,1],width=0.01*scale)
# plt.show()
for goal_index in range(4):
# embed()
# meshrange = np.arange(-scaled_size/2,scaled_size/2+1,5)
meshrange = (np.arange(-(size-1)/2,(size-1)/2+1)-0.5)*scale
evalrange = (np.arange(-(size-1)/2,(size-1)/2+1)-1)*scale
x = np.meshgrid(goal_states[goal_index,0]+meshrange,goal_states[goal_index,1]+meshrange)
dxdy = np.zeros((9,9,2))
# dxdy = action_map[policy_map[x[0],x[1]]]
plt.scatter(x[0],x[1])
plt.ylim(50,-50)
plt.xlim(-50,50)
arr = np.zeros((9,9,2))
for i in range(9):
for j in range(9):
a = goal_states[goal_index,0]+evalrange[i]
b = goal_states[goal_index,1]+evalrange[j]
bucket = get_bucket(np.array([a,b]), goal_states[goal_index])
arr[i,j,0] = i
arr[i,j,1] = j
# dxdy[bucket[0],bucket[1]] = action_map[goal_based_policy_maps[goal_index,bucket[0],bucket[1]]]
dxdy[bucket[0],bucket[1]] = action_map[policy_map[bucket[0],bucket[1]]]
# plt.arrow(x[0][i,j],x[1][i,j],0.1*dxdy[i,j,0],0.1*dxdy[i,j,1],width=0.01*scale)
# plt.quiver(x[0],x[1],0.1*dxdy[:,:,1],0.1*dxdy[:,:,0],width=0.0001,headwidth=4,headlength=2)
plt.quiver(x[0],x[1],0.1*dxdy[:,:,1],0.1*dxdy[:,:,0])
traj_len = 20
traj = np.zeros((20,2))
traj[0] = np.random.randint(-25,high=25,size=2)
for t in range(traj_len-1):
bucket = get_bucket(traj[t], goal_states[goal_index])
action_index = policy_map[bucket[0],bucket[1]]
action = action_map[action_index]
traj[t+1] = traj[t] + action
plt.plot(traj[:,0],traj[:,1],'r')
plt.plot(traj[:,0],traj[:,1],'or')
plt.show()
| CausalSkillLearning-main | DataGenerator/PolicyVisualizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
b_array_dataset[i,0] = 1.
# Select one of four starting points. (-2,-2), (-2,2), (2,-2), (2,2)
goal_array_dataset[i] = np.random.random_integers(0,high=3)
# Adding random noise to start state.
x_array_dataset[i,0] = start_states[goal_array_dataset[i]] + 0.2*(np.random.random(2)-0.5)
goal = -start_states[goal_array_dataset[i]]
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# b_array[t] = np.random.binomial(1,prob_b_given_x)
# b_array_dataset[i,t] = np.random.binomial(1,pb_x[0,x_array_dataset[i,t]])
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
axes = -goal/abs(goal)
step1 = 30*np.ones((2))-axes*np.abs(x_array_dataset[i,t]-x_array_dataset[i,0])
# baseline = t*20*np.sqrt(2)/20
baseline = t
step2 = step1-baseline
step3 = step2/step2.sum()
y_array_dataset[i,t] = np.random.choice(valid_options[goal_array_dataset[i][0]])
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.05+0.1*np.random.random((2))
# GET X
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
np.save("X_dir_cont_nonzero.npy",x_array_dataset)
np.save("Y_dir_cont_nonzero.npy",y_array_dataset)
np.save("B_dir_cont_nonzero.npy",b_array_dataset)
np.save("A_dir_cont_nonzero.npy",a_array_dataset)
np.save("G_dir_cont_nonzero.npy",goal_array_dataset)
| CausalSkillLearning-main | DataGenerator/DirectedContinuousNonZero.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np, copy
from IPython import embed
import matplotlib.pyplot as plt
number_datapoints = 20
# number_datapoints = 50000
number_timesteps = 25
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
# start_states = np.array([[-2,-2],[-2,2],[2,-2],[2,2]])*5
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*10
# Creating a policy map.
lim = 50
size = 9
scale = 5
policy_map = np.zeros((size,size),dtype=int)
# Row wise assignment:
policy_map[0,:] = 2
policy_map[1,:7] = 2
policy_map[1,7:] = 1
policy_map[2:4,0] = 2
policy_map[2:4,1:4] = 3
policy_map[2:4,4:7] = 2
policy_map[2:4,7:] = 1
policy_map[4,:4] = 3
policy_map[4,4] = 3
policy_map[4,5:] = 1
policy_map[5,:3] = 3
policy_map[5,3:5] = 0
policy_map[5,5:] = 1
policy_map[6,:2] = 3
policy_map[6,2:7] = 0
policy_map[6,7:] = 1
policy_map[7:,0] = 3
policy_map[7:,1:7] = 0
policy_map[7:,7:] = 1
# policy_map = np.transpose(policy_map)
goal_based_policy_maps = np.zeros((4,size,size))
goal_based_policy_maps[0] = copy.deepcopy(policy_map)
goal_based_policy_maps[1] = np.flipud(policy_map)
goal_based_policy_maps[2] = np.fliplr(policy_map)
goal_based_policy_maps[3] = np.flipud(np.fliplr(policy_map))
def get_bucket(state, reference_state):
# baseline = 4*np.ones(2)
baseline = np.zeros(2)
compensated_state = state - reference_state
# compensated_state = (np.round(state - reference_state) + baseline).astype(int)
x = (np.arange(-(size-1)/2,(size-1)/2+1)-0.5)*scale
bucket = np.zeros((2))
bucket[0] = min(np.searchsorted(x,compensated_state[0]),size-1)
bucket[1] = min(np.searchsorted(x,compensated_state[1]),size-1)
return bucket.astype(int)
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
# b_array_dataset[i,0] = 1.
goal_array_dataset[i] = np.random.random_integers(0,high=3)
# Adding random noise to start state.
# x_array_dataset[i,0] = goal_states[goal_array_dataset[i]] + 0.1*(np.random.random(2)-0.5)
scale = 25
x_array_dataset[i,0] = goal_states[goal_array_dataset[i]] + scale*(np.random.random(2)-0.5)
goal = goal_states[goal_array_dataset[i]]
reset_counter = 0
for t in range(number_timesteps-1):
# GET B
if t>0:
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
current_state = x_array_dataset[i,t]
# Select options from policy map, based on the bucket the current state falls in.
bucket = get_bucket(current_state, goal_states[goal_array_dataset[i]][0])
# Now that we've the bucket, pick the option we should be executing given the bucket.
if (bucket==0).all():
y_array_dataset[i,t] = np.random.randint(0,high=4)
else:
y_array_dataset[i,t] = goal_based_policy_maps[goal_array_dataset[i], bucket[0], bucket[1]]
y_array_dataset[i,t] = policy_map[bucket[0], bucket[1]]
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]-0.1*(np.random.random((2))-0.5)
# GET X
# Already taking care of backwards generation here, no need to use action_compliments.
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
plt.scatter(goal_states[:,0],goal_states[:,1],s=50)
# plt.scatter()
plt.scatter(x_array_dataset[i,:,0],x_array_dataset[i,:,1],cmap='jet',c=range(25))
plt.xlim(-lim,lim)
plt.ylim(-lim,lim)
plt.show()
# Roll over b's.
b_array_dataset = np.roll(b_array_dataset,1,axis=1)
np.save("X_goal_directed.npy",x_array_dataset)
np.save("Y_goal_directed.npy",y_array_dataset)
np.save("B_goal_directed.npy",b_array_dataset)
np.save("A_goal_directed.npy",a_array_dataset)
np.save("G_goal_directed.npy",goal_array_dataset)
| CausalSkillLearning-main | DataGenerator/NewGoalDirectedTraj.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from IPython import embed
import matplotlib.pyplot as plt
# number_datapoints = 20
number_datapoints = 50000
number_timesteps = 20
x_array_dataset = np.zeros((number_datapoints, number_timesteps, 2))
a_array_dataset = np.zeros((number_datapoints, number_timesteps-1, 2))
y_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
b_array_dataset = np.zeros((number_datapoints, number_timesteps-1),dtype=int)
goal_array_dataset = np.zeros((number_datapoints, 1),dtype=int)
start_config_dataset = np.zeros((number_datapoints, 1),dtype=int)
action_map = np.array([[0,-1],[-1,0],[0,1],[1,0]])
start_scale = 15
start_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*start_scale
goal_states = np.array([[-1,-1],[-1,1],[1,-1],[1,1]])*5
scale = 5
start_configs = np.zeros((4,5,2),dtype=int)
start_configs[[0,3]] = np.array([[-2,2],[-1,1],[0,0],[1,-1],[2,-2]])*scale
start_configs[[1,2]] = np.array([[-2,-2],[-1,-1],[0,0],[1,1],[2,2]])*scale
# valid_options = np.array([[2,3],[3,0],[1,2],[0,1]])
valid_options = np.array([[3,2],[3,0],[2,1],[0,1]])
lim = 50
progression_of_options = np.zeros((5,4),dtype=int)
progression_of_options[1,0] = 1
progression_of_options[2,:2] = 1
progression_of_options[3,1:] = 1
progression_of_options[4,:] = 1
for i in range(number_datapoints):
if i%1000==0:
print("Processing Datapoint: ",i)
goal_array_dataset[i] = np.random.random_integers(0,high=3)
start_config_dataset[i] = np.random.random_integers(0,high=4)
# start_config_dataset[i] = 4
# Adding random noise to start state.
x_array_dataset[i,0] = start_states[goal_array_dataset[i]] + start_configs[goal_array_dataset[i],start_config_dataset[i]] + 0.1*(np.random.random(2)-0.5)
reset_counter = 0
option_counter = 0
for t in range(number_timesteps-1):
# GET B
if t==0:
b_array_dataset[i,t] = 1
if t>0:
# If 3,4,5 timesteps have passed, terminate.
if reset_counter>=3 and reset_counter<5:
b_array_dataset[i,t] = np.random.binomial(1,0.33)
elif reset_counter==5:
b_array_dataset[i,t] = 1
# GET Y
if b_array_dataset[i,t]:
current_state = x_array_dataset[i,t]
# select new y_array_dataset[i,t]
y_array_dataset[i,t] = valid_options[goal_array_dataset[i]][0][progression_of_options[start_config_dataset[i],min(option_counter,3)]]
option_counter+=1
reset_counter = 0
else:
reset_counter+=1
y_array_dataset[i,t] = y_array_dataset[i,t-1]
# GET A
a_array_dataset[i,t] = action_map[y_array_dataset[i,t]]+0.1*(np.random.random((2))-0.5)
# GET X
# Already taking care of backwards generation here, no need to use action_compliments.
x_array_dataset[i,t+1] = x_array_dataset[i,t]+a_array_dataset[i,t]
# plt.scatter(goal_states[:,0],goal_states[:,1],s=50)
# # plt.scatter()
# plt.scatter(x_array_dataset[i,:,0],x_array_dataset[i,:,1],cmap='jet',c=range(number_timesteps))
# plt.xlim(-lim,lim)
# plt.ylim(-lim,lim)
# plt.show()
# Roll over b's.
b_array_dataset = np.roll(b_array_dataset,1,axis=1)
np.save("X_separable.npy",x_array_dataset)
np.save("Y_separable.npy",y_array_dataset)
np.save("B_separable.npy",b_array_dataset)
np.save("A_separable.npy",a_array_dataset)
np.save("G_separable.npy",goal_array_dataset)
np.save("StartConfig_separable.npy",start_config_dataset)
| CausalSkillLearning-main | DataGenerator/SeparableTrajs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
import numpy
extensions = [
Extension(
"cpc.eval.ABX.dtw",
["cpc/eval/ABX/dtw.pyx"],
include_dirs=[numpy.get_include()],
),
]
setup(
name='CPC_audio',
version='1.0',
description='An implementation of the contrast predictive coding (CPC) '
'training method for audio data.',
author='Facebook AI Research',
packages=find_packages(),
classifiers=["License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Programming Language :: Python"],
ext_modules=cythonize(extensions, language_level="3")
)
| CPC_audio-main | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
from cpc.model import CPCModel as cpcmodel
from cpc.cpc_default_config import get_default_cpc_config
from cpc.feature_loader import getEncoder, getAR, loadArgs
dependencies = ['torch', 'torchaudio']
def CPC_audio(pretrained=False,
**kwargs):
"""
Contrast predictive learning model for audio data
pretrained: if True, load a model trained on libri-light 60k
(https://arxiv.org/abs/1912.07875)
**kwargs : see cpc/cpc_default_config to get the list of possible arguments
"""
locArgs = get_default_cpc_config()
if pretrained:
checkpoint_url = 'https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/60k_epoch4-d0f474de.pt'
checkpoint = torch.hub.load_state_dict_from_url(checkpoint_url,
progress=False)
loadArgs(locArgs, argparse.Namespace(**checkpoint["config"]))
else:
args = argparse.Namespace(**kwargs)
loadArgs(locArgs, args)
encoderNet = getEncoder(locArgs)
arNet = getAR(locArgs)
model = cpcmodel(encoderNet, arNet)
if pretrained:
model.load_state_dict(checkpoint["weights"], strict=False)
return model
| CPC_audio-main | hubconf.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchaudio
import os
import json
import argparse
from .cpc_default_config import get_default_cpc_config
from .dataset import parseSeqLabels
from .model import CPCModel, ConcatenatedModel
class FeatureModule(torch.nn.Module):
r"""
A simpler interface to handle CPC models. Useful for a smooth workflow when
working with CPC trained features.
"""
def __init__(self, featureMaker, get_encoded, collapse=False):
super(FeatureModule, self).__init__()
self.get_encoded = get_encoded
self.featureMaker = featureMaker
self.collapse = collapse
def getDownsamplingFactor(self):
return self.featureMaker.gEncoder.DOWNSAMPLING
def forward(self, data):
batchAudio, label = data
cFeature, encoded, _ = self.featureMaker(batchAudio.cuda(), label)
if self.get_encoded:
cFeature = encoded
if self.collapse:
cFeature = cFeature.contiguous().view(-1, cFeature.size(2))
return cFeature
class ModelPhoneCombined(torch.nn.Module):
r"""
Concatenates a CPC feature maker and a phone predictor.
"""
def __init__(self, model, criterion, oneHot):
r"""
Arguments:
model (FeatureModule): feature maker
criterion (PhoneCriterion): phone predictor
oneHot (bool): set to True to get a one hot output
"""
super(ModelPhoneCombined, self).__init__()
self.model = model
self.criterion = criterion
self.oneHot = oneHot
def getDownsamplingFactor(self):
return self.model.getDownsamplingFactor()
def forward(self, data):
c_feature = self.model(data)
pred = self.criterion.getPrediction(c_feature)
P = pred.size(2)
if self.oneHot:
pred = pred.argmax(dim=2)
pred = toOneHot(pred, P)
else:
pred = torch.nn.functional.softmax(pred, dim=2)
return pred
def loadArgs(args, locArgs, forbiddenAttr=None):
for k, v in vars(locArgs).items():
if forbiddenAttr is not None:
if k not in forbiddenAttr:
setattr(args, k, v)
else:
setattr(args, k, v)
def loadSupervisedCriterion(pathCheckpoint):
from .criterion import CTCPhoneCriterion, PhoneCriterion
*_, args = getCheckpointData(os.path.dirname(pathCheckpoint))
_, nPhones = parseSeqLabels(args.pathPhone)
if args.CTC:
criterion = CTCPhoneCriterion(args.hiddenGar if not args.onEncoder
else args.hiddenEncoder,
nPhones, args.onEncoder)
else:
criterion = PhoneCriterion(args.hiddenGar, nPhones, args.onEncoder)
state_dict = torch.load(pathCheckpoint)
criterion.load_state_dict(state_dict["cpcCriterion"])
return criterion, nPhones
def getCheckpointData(pathDir):
if not os.path.isdir(pathDir):
return None
checkpoints = [x for x in os.listdir(pathDir)
if os.path.splitext(x)[1] == '.pt'
and os.path.splitext(x[11:])[0].isdigit()]
if len(checkpoints) == 0:
print("No checkpoints found at " + pathDir)
return None
checkpoints.sort(key=lambda x: int(os.path.splitext(x[11:])[0]))
data = os.path.join(pathDir, checkpoints[-1])
with open(os.path.join(pathDir, 'checkpoint_logs.json'), 'rb') as file:
logs = json.load(file)
with open(os.path.join(pathDir, 'checkpoint_args.json'), 'rb') as file:
args = json.load(file)
args = argparse.Namespace(**args)
defaultArgs = get_default_cpc_config()
loadArgs(defaultArgs, args)
return os.path.abspath(data), logs, defaultArgs
def getEncoder(args):
if args.encoder_type == 'mfcc':
from .model import MFCCEncoder
return MFCCEncoder(args.hiddenEncoder)
elif args.encoder_type == 'lfb':
from .model import LFBEnconder
return LFBEnconder(args.hiddenEncoder)
else:
from .model import CPCEncoder
return CPCEncoder(args.hiddenEncoder, args.normMode)
def getAR(args):
if args.arMode == 'transformer':
from .transformers import buildTransformerAR
arNet = buildTransformerAR(args.hiddenEncoder, 1,
args.sizeWindow // 160, args.abspos)
args.hiddenGar = args.hiddenEncoder
elif args.arMode == 'no_ar':
from .model import NoAr
arNet = NoAr()
else:
from .model import CPCAR
arNet = CPCAR(args.hiddenEncoder, args.hiddenGar,
args.samplingType == "sequential",
args.nLevelsGRU,
mode=args.arMode,
reverse=args.cpc_mode == "reverse")
return arNet
def loadModel(pathCheckpoints, loadStateDict=True):
models = []
hiddenGar, hiddenEncoder = 0, 0
for path in pathCheckpoints:
print(f"Loading checkpoint {path}")
_, _, locArgs = getCheckpointData(os.path.dirname(path))
doLoad = locArgs.load is not None and \
(len(locArgs.load) > 1 or
os.path.dirname(locArgs.load[0]) != os.path.dirname(path))
if doLoad:
m_, hg, he = loadModel(locArgs.load, loadStateDict=False)
hiddenGar += hg
hiddenEncoder += he
else:
encoderNet = getEncoder(locArgs)
arNet = getAR(locArgs)
m_ = CPCModel(encoderNet, arNet)
if loadStateDict:
print(f"Loading the state dict at {path}")
state_dict = torch.load(path, 'cpu')
m_.load_state_dict(state_dict["gEncoder"], strict=False)
if not doLoad:
hiddenGar += locArgs.hiddenGar
hiddenEncoder += locArgs.hiddenEncoder
models.append(m_)
if len(models) == 1:
return models[0], hiddenGar, hiddenEncoder
return ConcatenatedModel(models), hiddenGar, hiddenEncoder
def get_module(i_module):
if isinstance(i_module, torch.nn.DataParallel):
return get_module(i_module.module)
if isinstance(i_module, FeatureModule):
return get_module(i_module.module)
return i_module
def save_checkpoint(model_state, criterion_state, optimizer_state, best_state,
path_checkpoint):
state_dict = {"gEncoder": model_state,
"cpcCriterion": criterion_state,
"optimizer": optimizer_state,
"best": best_state}
torch.save(state_dict, path_checkpoint)
def toOneHot(inputVector, nItems):
batchSize, seqSize = inputVector.size()
out = torch.zeros((batchSize, seqSize, nItems),
device=inputVector.device, dtype=torch.long)
out.scatter_(2, inputVector.view(batchSize, seqSize, 1), 1)
return out
def seqNormalization(out):
# out.size() = Batch x Seq x Channels
mean = out.mean(dim=1, keepdim=True)
var = out.var(dim=1, keepdim=True)
return (out - mean) / torch.sqrt(var + 1e-08)
def buildFeature(featureMaker, seqPath, strict=False,
maxSizeSeq=64000, seqNorm=False):
r"""
Apply the featureMaker to the given file.
Arguments:
- featureMaker (FeatureModule): model to apply
- seqPath (string): path of the sequence to load
- strict (bool): if True, always work with chunks of the size
maxSizeSeq
- maxSizeSeq (int): maximal size of a chunk
- seqNorm (bool): if True, normalize the output along the time
dimension to get chunks of mean zero and var 1
Return:
a torch vector of size 1 x Seq_size x Feature_dim
"""
seq = torchaudio.load(seqPath)[0]
sizeSeq = seq.size(1)
start = 0
out = []
while start < sizeSeq:
if strict and start + maxSizeSeq > sizeSeq:
break
end = min(sizeSeq, start + maxSizeSeq)
subseq = (seq[:, start:end]).view(1, 1, -1).cuda(device=0)
with torch.no_grad():
features = featureMaker((subseq, None))
if seqNorm:
features = seqNormalization(features)
out.append(features.detach().cpu())
start += maxSizeSeq
if strict and start < sizeSeq:
subseq = (seq[:, -maxSizeSeq:]).view(1, 1, -1).cuda(device=0)
with torch.no_grad():
features = featureMaker((subseq, None))
if seqNorm:
features = seqNormalization(features)
delta = (sizeSeq - start) // featureMaker.getDownsamplingFactor()
out.append(features[:, -delta:].detach().cpu())
out = torch.cat(out, dim=1)
return out
| CPC_audio-main | cpc/feature_loader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import math
class ScaledDotProductAttention(nn.Module):
def __init__(self,
sizeSeq, # Size of the input sequence
dk, # Dimension of the input sequence
dropout, # Dropout parameter
relpos=False): # Do we retrieve positional information ?
super(ScaledDotProductAttention, self).__init__()
self.drop = nn.Dropout(dropout)
self.softmax = nn.Softmax(dim=2)
self.relpos = relpos
self.sizeSeq = sizeSeq
if relpos:
self.Krelpos = nn.Parameter(torch.Tensor(dk, sizeSeq))
self.initmat_(self.Krelpos)
self.register_buffer('z', torch.zeros(1, sizeSeq, 1))
# A mask is set so that a node never queries data in the future
mask = torch.tril(torch.ones(sizeSeq, sizeSeq), diagonal=0)
mask = 1 - mask
mask[mask == 1] = -float('inf')
self.register_buffer('mask', mask.unsqueeze(0))
def initmat_(self, mat, dim=0):
stdv = 1. / math.sqrt(mat.size(dim))
mat.data.uniform_(-stdv, stdv)
def forward(self, Q, K, V):
# Input dim : N x sizeSeq x dk
QK = torch.bmm(Q, K.transpose(-2, -1))
if self.relpos:
bsz = Q.size(0)
QP = Q.matmul(self.Krelpos)
# This trick with z fills QP's diagonal with zeros
QP = torch.cat((self.z.expand(bsz, -1, -1), QP), 2)
QK += QP.view(bsz, self.sizeSeq + 1, self.sizeSeq)[:, 1:, :]
A = self.softmax(QK / math.sqrt(K.size(-1)) + self.mask)
return torch.bmm(self.drop(A), V)
class MultiHeadAttention(nn.Module):
def __init__(self,
sizeSeq, # Size of a sequence
dropout, # Dropout parameter
dmodel, # Model's dimension
nheads, # Number of heads in the model
abspos): # Is positional information encoded in the input ?
super(MultiHeadAttention, self).__init__()
self.Wo = nn.Linear(dmodel, dmodel, bias=False)
self.Wk = nn.Linear(dmodel, dmodel, bias=False)
self.Wq = nn.Linear(dmodel, dmodel, bias=False)
self.Wv = nn.Linear(dmodel, dmodel, bias=False)
self.nheads = nheads
self.dk = dmodel // nheads
self.Att = ScaledDotProductAttention(sizeSeq, self.dk,
dropout, not abspos)
def trans_(self, x):
bsz, bptt, h, dk = x.size(0), x.size(1), self.nheads, self.dk
return x.view(bsz, bptt, h, dk).transpose(1, 2).contiguous().view(bsz * h, bptt, dk)
def reverse_trans_(self, x):
bsz, bptt, h, dk = x.size(
0) // self.nheads, x.size(1), self.nheads, self.dk
return x.view(bsz, h, bptt, dk).transpose(1, 2).contiguous().view(bsz, bptt, h * dk)
def forward(self, Q, K, V):
q = self.trans_(self.Wq(Q))
k = self.trans_(self.Wk(K))
v = self.trans_(self.Wv(V))
y = self.reverse_trans_(self.Att(q, k, v))
return self.Wo(y)
class FFNetwork(nn.Module):
def __init__(self, din, dout, dff, dropout):
super(FFNetwork, self).__init__()
self.lin1 = nn.Linear(din, dff, bias=True)
self.lin2 = nn.Linear(dff, dout, bias=True)
self.relu = nn.ReLU()
self.drop = nn.Dropout(dropout)
def forward(self, x):
return self.lin2(self.drop(self.relu(self.lin1(x))))
class TransformerLayer(nn.Module):
def __init__(self, sizeSeq=32, dmodel=512, dff=2048,
dropout=0.1, nheads=8,
abspos=False):
super(TransformerLayer, self).__init__()
self.multihead = MultiHeadAttention(sizeSeq, dropout,
dmodel, nheads, abspos)
self.ln_multihead = nn.LayerNorm(dmodel)
self.ffnetwork = FFNetwork(dmodel, dmodel, dff, dropout)
self.ln_ffnetwork = nn.LayerNorm(dmodel)
def forward(self, x):
y = self.ln_multihead(x + self.multihead(Q=x, K=x, V=x))
return self.ln_ffnetwork(y + self.ffnetwork(y))
class StaticPositionEmbedding(nn.Module):
def __init__(self, seqlen, dmodel):
super(StaticPositionEmbedding, self).__init__()
pos = torch.arange(0., seqlen).unsqueeze(1).repeat(1, dmodel)
dim = torch.arange(0., dmodel).unsqueeze(0).repeat(seqlen, 1)
div = torch.exp(- math.log(10000) * (2*(dim//2)/dmodel))
pos *= div
pos[:, 0::2] = torch.sin(pos[:, 0::2])
pos[:, 1::2] = torch.cos(pos[:, 1::2])
self.register_buffer('pe', pos.unsqueeze(0))
def forward(self, x):
return x + self.pe[:, :x.size(1), :]
def buildTransformerAR(dimEncoded, # Output dimension of the encoder
nLayers, # Number of transformer layers
sizeSeq, # Expected size of the input sequence
abspos):
layerSequence = []
if abspos:
layerSequence += [StaticPositionEmbedding(sizeSeq, dimEncoded)]
layerSequence += [TransformerLayer(sizeSeq=sizeSeq,
dmodel=dimEncoded, abspos=abspos)
for i in range(nLayers)]
return nn.Sequential(*layerSequence)
| CPC_audio-main | cpc/transformers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CPC_audio-main | cpc/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
import torchaudio
import torch
###########################################
# Networks
###########################################
class IDModule(nn.Module):
def __init__(self, *args, **kwargs):
super(IDModule, self).__init__()
def forward(self, x):
return x
class ChannelNorm(nn.Module):
def __init__(self,
numFeatures,
epsilon=1e-05,
affine=True):
super(ChannelNorm, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1,
numFeatures, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
cumMean = x.mean(dim=1, keepdim=True)
cumVar = x.var(dim=1, keepdim=True)
x = (x - cumMean)*torch.rsqrt(cumVar + self.epsilon)
if self.weight is not None:
x = x * self.weight + self.bias
return x
class CPCEncoder(nn.Module):
def __init__(self,
sizeHidden=512,
normMode="layerNorm"):
super(CPCEncoder, self).__init__()
validModes = ["batchNorm", "instanceNorm", "ID", "layerNorm"]
if normMode not in validModes:
raise ValueError(f"Norm mode must be in {validModes}")
if normMode == "instanceNorm":
def normLayer(x): return nn.InstanceNorm1d(x, affine=True)
elif normMode == "ID":
normLayer = IDModule
elif normMode == "layerNorm":
normLayer = ChannelNorm
else:
normLayer = nn.BatchNorm1d
self.dimEncoded = sizeHidden
self.conv0 = nn.Conv1d(1, sizeHidden, 10, stride=5, padding=3)
self.batchNorm0 = normLayer(sizeHidden)
self.conv1 = nn.Conv1d(sizeHidden, sizeHidden, 8, stride=4, padding=2)
self.batchNorm1 = normLayer(sizeHidden)
self.conv2 = nn.Conv1d(sizeHidden, sizeHidden, 4,
stride=2, padding=1)
self.batchNorm2 = normLayer(sizeHidden)
self.conv3 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)
self.batchNorm3 = normLayer(sizeHidden)
self.conv4 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)
self.batchNorm4 = normLayer(sizeHidden)
self.DOWNSAMPLING = 160
def getDimOutput(self):
return self.conv4.out_channels
def forward(self, x):
x = F.relu(self.batchNorm0(self.conv0(x)))
x = F.relu(self.batchNorm1(self.conv1(x)))
x = F.relu(self.batchNorm2(self.conv2(x)))
x = F.relu(self.batchNorm3(self.conv3(x)))
x = F.relu(self.batchNorm4(self.conv4(x)))
return x
class MFCCEncoder(nn.Module):
def __init__(self,
dimEncoded):
super(MFCCEncoder, self).__init__()
melkwargs = {"n_mels": max(128, dimEncoded), "n_fft": 321}
self.dimEncoded = dimEncoded
self.MFCC = torchaudio.transforms.MFCC(n_mfcc=dimEncoded,
melkwargs=melkwargs)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.MFCC(x)
return x.permute(0, 2, 1)
class LFBEnconder(nn.Module):
def __init__(self, dimEncoded, normalize=True):
super(LFBEnconder, self).__init__()
self.dimEncoded = dimEncoded
self.conv = nn.Conv1d(1, 2 * dimEncoded,
400, stride=1)
self.register_buffer('han', torch.hann_window(400).view(1, 1, 400))
self.instancenorm = nn.InstanceNorm1d(dimEncoded, momentum=1) \
if normalize else None
def forward(self, x):
N, C, L = x.size()
x = self.conv(x)
x = x.view(N, self.dimEncoded, 2, -1)
x = x[:, :, 0, :]**2 + x[:, :, 1, :]**2
x = x.view(N * self.dimEncoded, 1, -1)
x = torch.nn.functional.conv1d(x, self.han, bias=None,
stride=160, padding=350)
x = x.view(N, self.dimEncoded, -1)
x = torch.log(1 + torch.abs(x))
# Normalization
if self.instancenorm is not None:
x = self.instancenorm(x)
return x
class CPCAR(nn.Module):
def __init__(self,
dimEncoded,
dimOutput,
keepHidden,
nLevelsGRU,
mode="GRU",
reverse=False):
super(CPCAR, self).__init__()
self.RESIDUAL_STD = 0.1
if mode == "LSTM":
self.baseNet = nn.LSTM(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
elif mode == "RNN":
self.baseNet = nn.RNN(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
else:
self.baseNet = nn.GRU(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
self.hidden = None
self.keepHidden = keepHidden
self.reverse = reverse
def getDimOutput(self):
return self.baseNet.hidden_size
def forward(self, x):
if self.reverse:
x = torch.flip(x, [1])
try:
self.baseNet.flatten_parameters()
except RuntimeError:
pass
x, h = self.baseNet(x, self.hidden)
if self.keepHidden:
if isinstance(h, tuple):
self.hidden = tuple(x.detach() for x in h)
else:
self.hidden = h.detach()
# For better modularity, a sequence's order should be preserved
# by each module
if self.reverse:
x = torch.flip(x, [1])
return x
class NoAr(nn.Module):
def __init__(self, *args):
super(NoAr, self).__init__()
def forward(self, x):
return x
class BiDIRARTangled(nn.Module):
r"""
Research: bidirectionnal model for BERT training.
"""
def __init__(self,
dimEncoded,
dimOutput,
nLevelsGRU):
super(BiDIRARTangled, self).__init__()
assert(dimOutput % 2 == 0)
self.ARNet = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True,
bidirectional=True)
def getDimOutput(self):
return self.ARNet.hidden_size * 2
def forward(self, x):
self.ARNet.flatten_parameters()
xf, _ = self.ARNet(x)
return xf
class BiDIRAR(nn.Module):
r"""
Research: bidirectionnal model for BERT training.
"""
def __init__(self,
dimEncoded,
dimOutput,
nLevelsGRU):
super(BiDIRAR, self).__init__()
assert(dimOutput % 2 == 0)
self.netForward = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True)
self.netBackward = nn.GRU(dimEncoded, dimOutput // 2,
num_layers=nLevelsGRU, batch_first=True)
def getDimOutput(self):
return self.netForward.hidden_size * 2
def forward(self, x):
self.netForward.flatten_parameters()
self.netBackward.flatten_parameters()
xf, _ = self.netForward(x)
xb, _ = self.netBackward(torch.flip(x, [1]))
return torch.cat([xf, torch.flip(xb, [1])], dim=2)
###########################################
# Model
###########################################
class CPCModel(nn.Module):
def __init__(self,
encoder,
AR):
super(CPCModel, self).__init__()
self.gEncoder = encoder
self.gAR = AR
def forward(self, batchData, label):
encodedData = self.gEncoder(batchData).permute(0, 2, 1)
cFeature = self.gAR(encodedData)
return cFeature, encodedData, label
class ConcatenatedModel(nn.Module):
def __init__(self, model_list):
super(ConcatenatedModel, self).__init__()
self.models = torch.nn.ModuleList(model_list)
def forward(self, batchData, label):
outFeatures = []
outEncoded = []
for model in self.models:
cFeature, encodedData, label = model(batchData, label)
outFeatures.append(cFeature)
outEncoded.append(encodedData)
return torch.cat(outFeatures, dim=2), \
torch.cat(outEncoded, dim=2), label
| CPC_audio-main | cpc/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import time
import tqdm
import torch
import soundfile as sf
from pathlib import Path
from copy import deepcopy
from torch.multiprocessing import Pool
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import Sampler, BatchSampler
import torchaudio
class AudioBatchData(Dataset):
def __init__(self,
path,
sizeWindow,
seqNames,
phoneLabelsDict,
nSpeakers,
nProcessLoader=50,
MAX_SIZE_LOADED=4000000000):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabelsDict (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
- nSpeakers (int): number of speakers to expect.
- nProcessLoader (int): number of processes to call when loading the
data from the disk
- MAX_SIZE_LOADED (int): target maximal size of the floating array
containing all loaded data.
"""
self.MAX_SIZE_LOADED = MAX_SIZE_LOADED
self.nProcessLoader = nProcessLoader
self.dbPath = Path(path)
self.sizeWindow = sizeWindow
self.seqNames = [(s, self.dbPath / x) for s, x in seqNames]
self.reload_pool = Pool(nProcessLoader)
self.prepare()
self.speakers = list(range(nSpeakers))
self.data = []
self.phoneSize = 0 if phoneLabelsDict is None else \
phoneLabelsDict["step"]
self.phoneStep = 0 if phoneLabelsDict is None else \
self.sizeWindow // self.phoneSize
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.loadNextPack(first=True)
self.loadNextPack()
self.doubleLabels = False
def resetPhoneLabels(self, newPhoneLabels, step):
self.phoneSize = step
self.phoneStep = self.sizeWindow // self.phoneSize
self.phoneLabelsDict = deepcopy(newPhoneLabels)
self.loadNextPack()
def splitSeqTags(seqName):
path = os.path.normpath(seqName)
return path.split(os.sep)
def getSeqNames(self):
return [str(x[1]) for x in self.seqNames]
def clear(self):
if 'data' in self.__dict__:
del self.data
if 'speakerLabel' in self.__dict__:
del self.speakerLabel
if 'phoneLabels' in self.__dict__:
del self.phoneLabels
if 'seqLabel' in self.__dict__:
del self.seqLabel
def prepare(self):
random.shuffle(self.seqNames)
start_time = time.time()
print("Checking length...")
allLength = self.reload_pool.map(extractLength, self.seqNames)
self.packageIndex, self.totSize = [], 0
start, packageSize = 0, 0
for index, length in tqdm.tqdm(enumerate(allLength)):
packageSize += length
if packageSize > self.MAX_SIZE_LOADED:
self.packageIndex.append([start, index])
self.totSize += packageSize
start, packageSize = index, 0
if packageSize > 0:
self.packageIndex.append([start, len(self.seqNames)])
self.totSize += packageSize
print(f"Done, elapsed: {time.time() - start_time:.3f} seconds")
print(f'Scanned {len(self.seqNames)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f"{len(self.packageIndex)} chunks computed")
self.currentPack = -1
self.nextPack = 0
def getNPacks(self):
return len(self.packageIndex)
def loadNextPack(self, first=False):
self.clear()
if not first:
self.currentPack = self.nextPack
start_time = time.time()
print('Joining pool')
self.r.wait()
print(f'Joined process, elapsed={time.time()-start_time:.3f} secs')
self.nextData = self.r.get()
self.parseNextDataBlock()
del self.nextData
self.nextPack = (self.currentPack + 1) % len(self.packageIndex)
seqStart, seqEnd = self.packageIndex[self.nextPack]
if self.nextPack == 0 and len(self.packageIndex) > 1:
self.prepare()
self.r = self.reload_pool.map_async(loadFile,
self.seqNames[seqStart:seqEnd])
def parseNextDataBlock(self):
# Labels
self.speakerLabel = [0]
self.seqLabel = [0]
self.phoneLabels = []
speakerSize = 0
indexSpeaker = 0
# To accelerate the process a bit
self.nextData.sort(key=lambda x: (x[0], x[1]))
tmpData = []
for speaker, seqName, seq in self.nextData:
while self.speakers[indexSpeaker] < speaker:
indexSpeaker += 1
self.speakerLabel.append(speakerSize)
if self.speakers[indexSpeaker] != speaker:
raise ValueError(f'{speaker} invalid speaker')
if self.phoneLabelsDict is not None:
self.phoneLabels += self.phoneLabelsDict[seqName]
newSize = len(self.phoneLabelsDict[seqName]) * self.phoneSize
seq = seq[:newSize]
sizeSeq = seq.size(0)
tmpData.append(seq)
self.seqLabel.append(self.seqLabel[-1] + sizeSeq)
speakerSize += sizeSeq
del seq
self.speakerLabel.append(speakerSize)
self.data = torch.cat(tmpData, dim=0)
def getPhonem(self, idx):
idPhone = idx // self.phoneSize
return self.phoneLabels[idPhone:(idPhone + self.phoneStep)]
def getSpeakerLabel(self, idx):
idSpeaker = next(x[0] for x in enumerate(
self.speakerLabel) if x[1] > idx) - 1
return idSpeaker
def __len__(self):
return self.totSize // self.sizeWindow
def __getitem__(self, idx):
if idx < 0 or idx >= len(self.data) - self.sizeWindow - 1:
print(idx)
outData = self.data[idx:(self.sizeWindow + idx)].view(1, -1)
label = torch.tensor(self.getSpeakerLabel(idx), dtype=torch.long)
if self.phoneSize > 0:
label_phone = torch.tensor(self.getPhonem(idx), dtype=torch.long)
if not self.doubleLabels:
label = label_phone
else:
label_phone = torch.zeros(1)
if self.doubleLabels:
return outData, label, label_phone
return outData, label
def getNSpeakers(self):
return len(self.speakers)
def getNSeqs(self):
return len(self.seqLabel) - 1
def getNLoadsPerEpoch(self):
return len(self.packageIndex)
def getBaseSampler(self, type, batchSize, offset):
if type == "samespeaker":
return SameSpeakerSampler(batchSize, self.speakerLabel,
self.sizeWindow, offset)
if type == "samesequence":
return SameSpeakerSampler(batchSize, self.seqLabel,
self.sizeWindow, offset)
if type == "sequential":
return SequentialSampler(len(self.data), self.sizeWindow,
offset, batchSize)
sampler = UniformAudioSampler(len(self.data), self.sizeWindow,
offset)
return BatchSampler(sampler, batchSize, True)
def getDataLoader(self, batchSize, type, randomOffset, numWorkers=0,
onLoop=-1):
r"""
Get a batch sampler for the current dataset.
Args:
- batchSize (int): batch size
- groupSize (int): in the case of type in ["speaker", "sequence"]
number of items sharing a same label in the group
(see AudioBatchSampler)
- type (string):
type == "speaker": grouped sampler speaker-wise
type == "sequence": grouped sampler sequence-wise
type == "sequential": sequential sampling
else: uniform random sampling of the full audio
vector
- randomOffset (bool): if True add a random offset to the sampler
at the begining of each iteration
"""
nLoops = len(self.packageIndex)
totSize = self.totSize // (self.sizeWindow * batchSize)
if onLoop >= 0:
self.currentPack = onLoop - 1
self.loadNextPack()
nLoops = 1
def samplerCall():
offset = random.randint(0, self.sizeWindow // 2) \
if randomOffset else 0
return self.getBaseSampler(type, batchSize, offset)
return AudioLoader(self, samplerCall, nLoops, self.loadNextPack,
totSize, numWorkers)
def loadFile(data):
speaker, fullPath = data
seqName = fullPath.stem
# Due to some issues happening when combining torchaudio.load
# with torch.multiprocessing we use soundfile to load the data
seq = torch.tensor(sf.read(fullPath)[0]).float()
if len(seq.size()) == 2:
seq = seq.mean(dim=1)
return speaker, seqName, seq
class AudioLoader(object):
r"""
A DataLoader meant to handle an AudioBatchData object.
In order to handle big datasets AudioBatchData works with big chunks of
audio it loads sequentially in memory: once all batches have been sampled
on a chunk, the AudioBatchData loads the next one.
"""
def __init__(self,
dataset,
samplerCall,
nLoop,
updateCall,
size,
numWorkers):
r"""
Args:
- dataset (AudioBatchData): target dataset
- samplerCall (function): batch-sampler to call
- nLoop (int): number of chunks to load
- updateCall (function): function loading the next chunk
- size (int): total number of batches
- numWorkers (int): see torch.utils.data.DataLoader
"""
self.samplerCall = samplerCall
self.updateCall = updateCall
self.nLoop = nLoop
self.size = size
self.dataset = dataset
self.numWorkers = numWorkers
def __len__(self):
return self.size
def __iter__(self):
for i in range(self.nLoop):
sampler = self.samplerCall()
dataloader = DataLoader(self.dataset,
batch_sampler=sampler,
num_workers=self.numWorkers)
for x in dataloader:
yield x
if i < self.nLoop - 1:
self.updateCall()
class UniformAudioSampler(Sampler):
def __init__(self,
dataSize,
sizeWindow,
offset):
self.len = dataSize // sizeWindow
self.sizeWindow = sizeWindow
self.offset = offset
if self.offset > 0:
self.len -= 1
def __iter__(self):
return iter((self.offset
+ self.sizeWindow * torch.randperm(self.len)).tolist())
def __len__(self):
return self.len
class SequentialSampler(Sampler):
def __init__(self, dataSize, sizeWindow, offset, batchSize):
self.len = (dataSize // sizeWindow) // batchSize
self.sizeWindow = sizeWindow
self.offset = offset
self.startBatches = [x * (dataSize // batchSize)
for x in range(batchSize)]
self.batchSize = batchSize
if self.offset > 0:
self.len -= 1
def __iter__(self):
for idx in range(self.len):
yield [self.offset + self.sizeWindow * idx
+ start for start in self.startBatches]
def __len__(self):
return self.len
class SameSpeakerSampler(Sampler):
def __init__(self,
batchSize,
samplingIntervals,
sizeWindow,
offset):
self.samplingIntervals = samplingIntervals
self.sizeWindow = sizeWindow
self.batchSize = batchSize
self.offset = offset
if self.samplingIntervals[0] != 0:
raise AttributeError("Sampling intervals should start at zero")
nWindows = len(self.samplingIntervals) - 1
self.sizeSamplers = [(self.samplingIntervals[i+1] -
self.samplingIntervals[i]) // self.sizeWindow
for i in range(nWindows)]
if self.offset > 0:
self.sizeSamplers = [max(0, x - 1) for x in self.sizeSamplers]
order = [(x, torch.randperm(val).tolist())
for x, val in enumerate(self.sizeSamplers) if val > 0]
# Build Batches
self.batches = []
for indexSampler, randperm in order:
indexStart, sizeSampler = 0, self.sizeSamplers[indexSampler]
while indexStart < sizeSampler:
indexEnd = min(sizeSampler, indexStart + self.batchSize)
locBatch = [self.getIndex(x, indexSampler)
for x in randperm[indexStart:indexEnd]]
indexStart = indexEnd
self.batches.append(locBatch)
def __len__(self):
return len(self.batches)
def getIndex(self, x, iInterval):
return self.offset + x * self.sizeWindow \
+ self.samplingIntervals[iInterval]
def __iter__(self):
random.shuffle(self.batches)
return iter(self.batches)
def extractLength(couple):
speaker, locPath = couple
info = torchaudio.info(str(locPath))[0]
return info.length
def findAllSeqs(dirName,
extension='.flac',
loadCache=False,
speaker_level=1):
r"""
Lists all the sequences with the given extension in the dirName directory.
Output:
outSequences, speakers
outSequence
A list of tuples seq_path, speaker where:
- seq_path is the relative path of each sequence relative to the
parent directory
- speaker is the corresponding speaker index
outSpeakers
The speaker labels (in order)
The speaker labels are organized the following way
\dirName
\speaker_label
\..
...
seqName.extension
Adjust the value of speaker_level if you want to choose which level of
directory defines the speaker label. Ex if speaker_level == 2 then the
dataset should be organized in the following fashion
\dirName
\crappy_label
\speaker_label
\..
...
seqName.extension
Set speaker_label == 0 if no speaker label will be retrieved no matter the
organization of the dataset.
"""
cache_path = os.path.join(dirName, '_seqs_cache.txt')
if loadCache:
try:
outSequences, speakers = torch.load(cache_path)
print(f'Loaded from cache {cache_path} successfully')
return outSequences, speakers
except OSError as err:
print(f'Ran in an error while loading {cache_path}: {err}')
print('Could not load cache, rebuilding')
if dirName[-1] != os.sep:
dirName += os.sep
prefixSize = len(dirName)
speakersTarget = {}
outSequences = []
for root, dirs, filenames in tqdm.tqdm(os.walk(dirName)):
filtered_files = [f for f in filenames if f.endswith(extension)]
if len(filtered_files) > 0:
speakerStr = (os.sep).join(
root[prefixSize:].split(os.sep)[:speaker_level])
if speakerStr not in speakersTarget:
speakersTarget[speakerStr] = len(speakersTarget)
speaker = speakersTarget[speakerStr]
for filename in filtered_files:
full_path = os.path.join(root[prefixSize:], filename)
outSequences.append((speaker, full_path))
outSpeakers = [None for x in speakersTarget]
for key, index in speakersTarget.items():
outSpeakers[index] = key
try:
torch.save((outSequences, outSpeakers), cache_path)
print(f'Saved cache file at {cache_path}')
except OSError as err:
print(f'Ran in an error while saving {cache_path}: {err}')
return outSequences, outSpeakers
def parseSeqLabels(pathLabels):
with open(pathLabels, 'r') as f:
lines = f.readlines()
output = {"step": 160} # Step in librispeech dataset is 160bits
maxPhone = 0
for line in lines:
data = line.split()
output[data[0]] = [int(x) for x in data[1:]]
maxPhone = max(maxPhone, max(output[data[0]]))
return output, maxPhone + 1
def filterSeqs(pathTxt, seqCouples):
with open(pathTxt, 'r') as f:
inSeqs = [p.replace('\n', '') for p in f.readlines()]
inSeqs.sort()
seqCouples.sort(key=lambda x: os.path.basename(os.path.splitext(x[1])[0]))
output, index = [], 0
for x in seqCouples:
seq = os.path.basename(os.path.splitext(x[1])[0])
while index < len(inSeqs) and seq > inSeqs[index]:
index += 1
if index == len(inSeqs):
break
if seq == inSeqs[index]:
output.append(x)
return output
| CPC_audio-main | cpc/dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import numpy as np
import torch
import time
from copy import deepcopy
import random
import psutil
import sys
import cpc.criterion as cr
import cpc.model as model
import cpc.utils.misc as utils
import cpc.feature_loader as fl
from cpc.cpc_default_config import set_default_cpc_config
from cpc.dataset import AudioBatchData, findAllSeqs, filterSeqs, parseSeqLabels
def getCriterion(args, downsampling, nSpeakers, nPhones):
dimFeatures = args.hiddenGar if not args.onEncoder else args.hiddenEncoder
if not args.supervised:
if args.cpc_mode == 'none':
cpcCriterion = cr.NoneCriterion()
else:
sizeInputSeq = (args.sizeWindow // downsampling)
cpcCriterion = cr.CPCUnsupersivedCriterion(args.nPredicts,
args.hiddenGar,
args.hiddenEncoder,
args.negativeSamplingExt,
mode=args.cpc_mode,
rnnMode=args.rnnMode,
dropout=args.dropout,
nSpeakers=nSpeakers,
speakerEmbedding=args.speakerEmbedding,
sizeInputSeq=sizeInputSeq)
elif args.pathPhone is not None:
if not args.CTC:
cpcCriterion = cr.PhoneCriterion(dimFeatures,
nPhones, args.onEncoder,
nLayers=args.nLevelsPhone)
else:
cpcCriterion = cr.CTCPhoneCriterion(dimFeatures,
nPhones, args.onEncoder)
else:
cpcCriterion = cr.SpeakerCriterion(dimFeatures, nSpeakers)
return cpcCriterion
def loadCriterion(pathCheckpoint, downsampling, nSpeakers, nPhones):
_, _, locArgs = fl.getCheckpointData(os.path.dirname(pathCheckpoint))
criterion = getCriterion(locArgs, downsampling, nSpeakers, nPhones)
state_dict = torch.load(pathCheckpoint, 'cpu')
criterion.load_state_dict(state_dict["cpcCriterion"])
return criterion
def trainStep(dataLoader,
cpcModel,
cpcCriterion,
optimizer,
scheduler,
loggingStep):
cpcModel.train()
cpcCriterion.train()
start_time = time.perf_counter()
n_examples = 0
logs, lastlogs = {}, None
iter = 0
for step, fulldata in enumerate(dataLoader):
batchData, label = fulldata
n_examples += batchData.size(0)
batchData = batchData.cuda(non_blocking=True)
label = label.cuda(non_blocking=True)
c_feature, encoded_data, label = cpcModel(batchData, label)
allLosses, allAcc = cpcCriterion(c_feature, encoded_data, label)
totLoss = allLosses.sum()
totLoss.backward()
# Show grads ?
optimizer.step()
optimizer.zero_grad()
if "locLoss_train" not in logs:
logs["locLoss_train"] = np.zeros(allLosses.size(1))
logs["locAcc_train"] = np.zeros(allLosses.size(1))
iter += 1
logs["locLoss_train"] += (allLosses.mean(dim=0)).detach().cpu().numpy()
logs["locAcc_train"] += (allAcc.mean(dim=0)).cpu().numpy()
if (step + 1) % loggingStep == 0:
new_time = time.perf_counter()
elapsed = new_time - start_time
print(f"Update {step + 1}")
print(f"elapsed: {elapsed:.1f} s")
print(
f"{1000.0 * elapsed / loggingStep:.1f} ms per batch, {1000.0 * elapsed / n_examples:.1f} ms / example")
locLogs = utils.update_logs(logs, loggingStep, lastlogs)
lastlogs = deepcopy(logs)
utils.show_logs("Training loss", locLogs)
start_time, n_examples = new_time, 0
if scheduler is not None:
scheduler.step()
logs = utils.update_logs(logs, iter)
logs["iter"] = iter
utils.show_logs("Average training loss on epoch", logs)
return logs
def valStep(dataLoader,
cpcModel,
cpcCriterion):
cpcCriterion.eval()
cpcModel.eval()
logs = {}
cpcCriterion.eval()
cpcModel.eval()
iter = 0
for step, fulldata in enumerate(dataLoader):
batchData, label = fulldata
batchData = batchData.cuda(non_blocking=True)
label = label.cuda(non_blocking=True)
with torch.no_grad():
c_feature, encoded_data, label = cpcModel(batchData, label)
allLosses, allAcc = cpcCriterion(c_feature, encoded_data, label)
if "locLoss_val" not in logs:
logs["locLoss_val"] = np.zeros(allLosses.size(1))
logs["locAcc_val"] = np.zeros(allLosses.size(1))
iter += 1
logs["locLoss_val"] += allLosses.mean(dim=0).cpu().numpy()
logs["locAcc_val"] += allAcc.mean(dim=0).cpu().numpy()
logs = utils.update_logs(logs, iter)
logs["iter"] = iter
utils.show_logs("Validation loss:", logs)
return logs
def run(trainDataset,
valDataset,
batchSize,
samplingMode,
cpcModel,
cpcCriterion,
nEpoch,
pathCheckpoint,
optimizer,
scheduler,
logs):
print(f"Running {nEpoch} epochs")
startEpoch = len(logs["epoch"])
bestAcc = 0
bestStateDict = None
start_time = time.time()
for epoch in range(startEpoch, nEpoch):
print(f"Starting epoch {epoch}")
utils.cpu_stats()
trainLoader = trainDataset.getDataLoader(batchSize, samplingMode,
True, numWorkers=0)
valLoader = valDataset.getDataLoader(batchSize, 'sequential', False,
numWorkers=0)
print("Training dataset %d batches, Validation dataset %d batches, batch size %d" %
(len(trainLoader), len(valLoader), batchSize))
locLogsTrain = trainStep(trainLoader, cpcModel, cpcCriterion,
optimizer, scheduler, logs["logging_step"])
locLogsVal = valStep(valLoader, cpcModel, cpcCriterion)
print(f'Ran {epoch + 1} epochs '
f'in {time.time() - start_time:.2f} seconds')
torch.cuda.empty_cache()
currentAccuracy = float(locLogsVal["locAcc_val"].mean())
if currentAccuracy > bestAcc:
bestStateDict = fl.get_module(cpcModel).state_dict()
for key, value in dict(locLogsTrain, **locLogsVal).items():
if key not in logs:
logs[key] = [None for x in range(epoch)]
if isinstance(value, np.ndarray):
value = value.tolist()
logs[key].append(value)
logs["epoch"].append(epoch)
if pathCheckpoint is not None \
and (epoch % logs["saveStep"] == 0 or epoch == nEpoch-1):
modelStateDict = fl.get_module(cpcModel).state_dict()
criterionStateDict = fl.get_module(cpcCriterion).state_dict()
fl.save_checkpoint(modelStateDict, criterionStateDict,
optimizer.state_dict(), bestStateDict,
f"{pathCheckpoint}_{epoch}.pt")
utils.save_logs(logs, pathCheckpoint + "_logs.json")
def main(args):
args = parseArgs(args)
utils.set_seed(args.random_seed)
logs = {"epoch": [], "iter": [], "saveStep": args.save_step}
loadOptimizer = False
if args.pathCheckpoint is not None and not args.restart:
cdata = fl.getCheckpointData(args.pathCheckpoint)
if cdata is not None:
data, logs, locArgs = cdata
print(f"Checkpoint detected at {data}")
fl.loadArgs(args, locArgs,
forbiddenAttr={"nGPU", "pathCheckpoint",
"debug", "restart", "world_size",
"n_nodes", "node_id", "n_gpu_per_node",
"max_size_loaded"})
args.load, loadOptimizer = [data], True
args.loadCriterion = True
logs["logging_step"] = args.logging_step
print(f'CONFIG:\n{json.dumps(vars(args), indent=4, sort_keys=True)}')
print('-' * 50)
seqNames, speakers = findAllSeqs(args.pathDB,
extension=args.file_extension,
loadCache=not args.ignore_cache)
print(f'Found files: {len(seqNames)} seqs, {len(speakers)} speakers')
# Datasets
if args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, seqNames)
else:
seqTrain = seqNames
if args.pathVal is None:
random.shuffle(seqTrain)
sizeTrain = int(0.99 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
print(f'Found files: {len(seqTrain)} train, {len(seqVal)} val')
else:
seqVal = filterSeqs(args.pathVal, seqNames)
if args.debug:
seqTrain = seqTrain[-1000:]
seqVal = seqVal[-100:]
phoneLabels, nPhones = None, None
if args.supervised and args.pathPhone is not None:
print("Loading the phone labels at " + args.pathPhone)
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
print(f"{nPhones} phones found")
print("")
print(f'Loading audio data at {args.pathDB}')
print("Loading the training dataset")
trainDataset = AudioBatchData(args.pathDB,
args.sizeWindow,
seqTrain,
phoneLabels,
len(speakers),
nProcessLoader=args.n_process_loader,
MAX_SIZE_LOADED=args.max_size_loaded)
print("Training dataset loaded")
print("")
print("Loading the validation dataset")
valDataset = AudioBatchData(args.pathDB,
args.sizeWindow,
seqVal,
phoneLabels,
len(speakers),
nProcessLoader=args.n_process_loader)
print("Validation dataset loaded")
print("")
if args.load is not None:
cpcModel, args.hiddenGar, args.hiddenEncoder = \
fl.loadModel(args.load)
else:
# Encoder network
encoderNet = fl.getEncoder(args)
# AR Network
arNet = fl.getAR(args)
cpcModel = model.CPCModel(encoderNet, arNet)
batchSize = args.nGPU * args.batchSizeGPU
cpcModel.supervised = args.supervised
# Training criterion
if args.load is not None and args.loadCriterion:
cpcCriterion = loadCriterion(args.load[0], cpcModel.gEncoder.DOWNSAMPLING,
len(speakers), nPhones)
else:
cpcCriterion = getCriterion(args, cpcModel.gEncoder.DOWNSAMPLING,
len(speakers), nPhones)
if loadOptimizer:
state_dict = torch.load(args.load[0], 'cpu')
cpcCriterion.load_state_dict(state_dict["cpcCriterion"])
cpcCriterion.cuda()
cpcModel.cuda()
# Optimizer
g_params = list(cpcCriterion.parameters()) + list(cpcModel.parameters())
lr = args.learningRate
optimizer = torch.optim.Adam(g_params, lr=lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
if loadOptimizer:
print("Loading optimizer " + args.load[0])
state_dict = torch.load(args.load[0], 'cpu')
if "optimizer" in state_dict:
optimizer.load_state_dict(state_dict["optimizer"])
# Checkpoint
if args.pathCheckpoint is not None:
if not os.path.isdir(args.pathCheckpoint):
os.mkdir(args.pathCheckpoint)
args.pathCheckpoint = os.path.join(args.pathCheckpoint, "checkpoint")
scheduler = None
if args.schedulerStep > 0:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
args.schedulerStep,
gamma=0.5)
if args.schedulerRamp is not None:
n_epoch = args.schedulerRamp
print(f"Ramp activated. n_e = {n_epoch}")
scheduler_ramp = torch.optim.lr_scheduler.LambdaLR(optimizer,
lr_lambda=lambda epoch: utils.ramp_scheduling_function(
n_epoch, epoch),
last_epoch=-1)
if scheduler is None:
scheduler = scheduler_ramp
else:
scheduler = utils.SchedulerCombiner([scheduler_ramp, scheduler],
[0, args.schedulerRamp])
if scheduler is not None:
for i in range(len(logs["epoch"])):
scheduler.step()
cpcModel = torch.nn.DataParallel(cpcModel,
device_ids=range(args.nGPU)).cuda()
cpcCriterion = torch.nn.DataParallel(cpcCriterion,
device_ids=range(args.nGPU)).cuda()
run(trainDataset,
valDataset,
batchSize,
args.samplingType,
cpcModel,
cpcCriterion,
args.nEpoch,
args.pathCheckpoint,
optimizer,
scheduler,
logs)
def parseArgs(argv):
# Run parameters
parser = argparse.ArgumentParser(description='Trainer')
# Default arguments:
parser = set_default_cpc_config(parser)
group_db = parser.add_argument_group('Dataset')
group_db.add_argument('--pathDB', type=str, default=None,
help='Path to the directory containing the '
'data.')
group_db.add_argument('--file_extension', type=str, default=".flac",
help="Extension of the audio files in the dataset.")
group_db.add_argument('--pathTrain', type=str, default=None,
help='Path to a .txt file containing the list of the '
'training sequences.')
group_db.add_argument('--pathVal', type=str, default=None,
help='Path to a .txt file containing the list of the '
'validation sequences.')
group_db.add_argument('--n_process_loader', type=int, default=8,
help='Number of processes to call to load the '
'dataset')
group_db.add_argument('--ignore_cache', action='store_true',
help='Activate if the dataset has been modified '
'since the last training session.')
group_db.add_argument('--max_size_loaded', type=int, default=4000000000,
help='Maximal amount of data (in byte) a dataset '
'can hold in memory at any given time')
group_supervised = parser.add_argument_group(
'Supervised mode (depreciated)')
group_supervised.add_argument('--supervised', action='store_true',
help='(Depreciated) Disable the CPC loss and activate '
'the supervised mode. By default, the supervised '
'training method is the speaker classification.')
group_supervised.add_argument('--pathPhone', type=str, default=None,
help='(Supervised mode only) Path to a .txt '
'containing the phone labels of the dataset. If given '
'and --supervised, will train the model using a '
'phone classification task.')
group_supervised.add_argument('--CTC', action='store_true')
group_save = parser.add_argument_group('Save')
group_save.add_argument('--pathCheckpoint', type=str, default=None,
help="Path of the output directory.")
group_save.add_argument('--logging_step', type=int, default=1000)
group_save.add_argument('--save_step', type=int, default=5,
help="Frequency (in epochs) at which a checkpoint "
"should be saved")
group_load = parser.add_argument_group('Load')
group_load.add_argument('--load', type=str, default=None, nargs='*',
help="Load an exsiting checkpoint. Should give a path "
"to a .pt file. The directory containing the file to "
"load should also have a 'checkpoint.logs' and a "
"'checkpoint.args'")
group_load.add_argument('--loadCriterion', action='store_true',
help="If --load is activated, load the state of the "
"training criterion as well as the state of the "
"feature network (encoder + AR)")
group_load.add_argument('--restart', action='store_true',
help="If any checkpoint is found, ignore it and "
"restart the training from scratch.")
group_gpu = parser.add_argument_group('GPUs')
group_gpu.add_argument('--nGPU', type=int, default=-1,
help="Number of GPU to use (default: use all "
"available GPUs)")
group_gpu.add_argument('--batchSizeGPU', type=int, default=8,
help='Number of batches per GPU.')
parser.add_argument('--debug', action='store_true',
help="Load only a very small amount of files for "
"debugging purposes.")
args = parser.parse_args(argv)
if args.pathDB is None and (args.pathCheckpoint is None or args.restart):
parser.print_help()
print("Either provides an input dataset or a checkpoint to load")
sys.exit()
if args.pathCheckpoint is not None:
args.pathCheckpoint = os.path.abspath(args.pathCheckpoint)
if args.load is not None:
args.load = [os.path.abspath(x) for x in args.load]
# set it up if needed, so that it is dumped along with other args
if args.random_seed is None:
args.random_seed = random.randint(0, 2**31)
if args.nGPU < 0:
args.nGPU = torch.cuda.device_count()
assert args.nGPU <= torch.cuda.device_count(),\
f"number of GPU asked: {args.nGPU}," \
f"number GPU detected: {torch.cuda.device_count()}"
print(f"Let's use {args.nGPU} GPUs!")
if args.arMode == 'no_ar':
args.hiddenGar = args.hiddenEncoder
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
args = sys.argv[1:]
main(args)
| CPC_audio-main | cpc/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import os
import cpc.feature_loader as fl
from .dataset import AudioBatchData, findAllSeqs, filterSeqs
from nose.tools import eq_, ok_
from math import log
from pathlib import Path
class TestDataLoader(unittest.TestCase):
def setUp(self):
self.seq_names = ['6476/57446/6476-57446-0019.flac',
'5678/43303/5678-43303-0032.flac',
'5678/43303/5678-43303-0024.flac',
'5678/43301/5678-43301-0021.flac',
'5393/19218/5393-19218-0024.flac',
'4397/15668/4397-15668-0007.flac',
'4397/15668/4397-15668-0003.flac']
self.test_data_dir = Path(__file__).parent / 'test_data'
self.path_db = self.test_data_dir / 'test_db'
self.seq_list = self.test_data_dir / 'seq_list.txt'
self.size_window = 20480
def testFindAllSeqs(self):
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac")
expected_output = [(0, '2911/12359/2911-12359-0007.flac'),
(1, '4051/11218/4051-11218-0044.flac'),
(2, '4397/15668/4397-15668-0003.flac'),
(2, '4397/15668/4397-15668-0007.flac'),
(3, '5393/19218/5393-19218-0024.flac'),
(4, '5678/43301/5678-43301-0021.flac'),
(4, '5678/43303/5678-43303-0024.flac'),
(4, '5678/43303/5678-43303-0032.flac'),
(5, '6476/57446/6476-57446-0019.flac')]
# We do not expect the findAllSeqs function to retrieve all sequences
# in a specific order. However, it should retrieve them all correctly
# Check the number of speakers
eq_(len(speakers), 6)
# Check the speakers names
eq_(set(speakers), {'2911', '4051', '4397', '5393', '5678', '6476'})
# Check that all speakers from 0 to 5 are represented
speaker_set = {x[0] for x in seq_names}
eq_(speaker_set, {x[0] for x in expected_output})
# Check the number of sequences
eq_(len(seq_names), len(expected_output))
# Check that the sequences are correct
sequence_set = {x[1] for x in seq_names}
eq_(sequence_set, {x[1] for x in expected_output})
# Check that the speakers are properly matched
for index_speaker, seq_name in seq_names:
speaker_name = str(Path(seq_name).stem).split('-')[0]
eq_(speakers[index_speaker], speaker_name)
def testFindAllSeqsCustomSpeakers(self):
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac",
speaker_level=2)
expected_speakers = {'2911/12359', '4051/11218', '4397/15668',
'5393/19218', '5678/43301', '5678/43303',
'6476/57446'}
eq_(set(speakers), expected_speakers)
for index_speaker, seq_name in seq_names:
speaker_name = '/'.join(str(Path(seq_name).stem).split('-')[:2])
eq_(speakers[index_speaker], speaker_name)
expected_output = [(0, '2911/12359/2911-12359-0007.flac'),
(1, '4051/11218/4051-11218-0044.flac'),
(2, '4397/15668/4397-15668-0003.flac'),
(2, '4397/15668/4397-15668-0007.flac'),
(3, '5393/19218/5393-19218-0024.flac'),
(4, '5678/43301/5678-43301-0021.flac'),
(5, '5678/43303/5678-43303-0024.flac'),
(5, '5678/43303/5678-43303-0032.flac'),
(6, '6476/57446/6476-57446-0019.flac')]
# Check that the sequences are correct
sequence_set = {x[1] for x in seq_names}
eq_(sequence_set, {x[1] for x in expected_output})
def testFindAllSeqs0Speakers(self):
seq_names, speakers = findAllSeqs(str(self.path_db / '2911/12359/'),
extension=".flac")
eq_(speakers, [''])
def testFindAllSeqs0SpeakersForced(self):
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac", speaker_level=0)
eq_(speakers, [''])
def testLoadData(self):
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac")
seq_names = filterSeqs(self.seq_list, seq_names)
expected_output = [(2, '4397/15668/4397-15668-0003.flac'),
(2, '4397/15668/4397-15668-0007.flac'),
(3, '5393/19218/5393-19218-0024.flac'),
(4, '5678/43301/5678-43301-0021.flac'),
(4, '5678/43303/5678-43303-0024.flac'),
(4, '5678/43303/5678-43303-0032.flac'),
(5, '6476/57446/6476-57446-0019.flac')]
eq_(len(seq_names), len(expected_output))
eq_({x[1] for x in seq_names}, {x[1] for x in expected_output})
phone_labels_dict = None
n_speakers = 9
test_data = AudioBatchData(self.path_db, self.size_window,
seq_names, phone_labels_dict, n_speakers)
assert(test_data.getNSpeakers() == 9)
assert(test_data.getNSeqs() == 7)
def testDataLoader(self):
batch_size = 2
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac")
seq_names = filterSeqs(self.seq_list, seq_names)
test_data = AudioBatchData(self.path_db, self.size_window, seq_names,
None, len(speakers))
test_data_loader = test_data.getDataLoader(batch_size, "samespeaker",
True, numWorkers=2)
visted_labels = set()
for index, item in enumerate(test_data_loader):
_, labels = item
p = labels[0].item()
visted_labels.add(p)
eq_(torch.sum(labels == p), labels.size(0))
eq_(len(visted_labels), 4)
def testPartialLoader(self):
batch_size = 16
seq_names, speakers = findAllSeqs(str(self.path_db),
extension=".flac")
seq_names = filterSeqs(self.seq_list, seq_names)
test_data = AudioBatchData(self.path_db, self.size_window,
seq_names, None, len(speakers),
MAX_SIZE_LOADED=1000000)
eq_(test_data.getNPacks(), 2)
test_data_loader = test_data.getDataLoader(batch_size, "samespeaker",
True, numWorkers=2)
visted_labels = set()
for index, item in enumerate(test_data_loader):
_, labels = item
p = labels[0].item()
eq_(torch.sum(labels == p), labels.size(0))
visted_labels.add(p)
eq_(len(visted_labels), 4)
class TestPhonemParser(unittest.TestCase):
def setUp(self):
from .train import parseSeqLabels
self.seqLoader = parseSeqLabels
self.test_data_dir = Path(__file__).parent / 'test_data'
self.pathPhone = self.test_data_dir / 'phone_labels.txt'
self.path_db = self.test_data_dir / 'test_db'
def testSeqLoader(self):
phone_data, nPhones = self.seqLoader(self.pathPhone)
eq_(len(phone_data), 7)
eq_(phone_data['step'], 160)
eq_(phone_data['4051-11218-0044'][43], 14)
eq_(len(phone_data['4051-11218-0044']), 1119)
eq_(nPhones, 41)
def testSeqLabels(self):
size_window = 640
seq_names = [(0, '2911/12359/2911-12359-0007.flac'),
(1, '4051/11218/4051-11218-0044.flac')]
speakers = list(set([x[0] for x in seq_names]))
phone_data, _ = self.seqLoader(self.pathPhone)
test_data = AudioBatchData(
self.path_db, size_window, seq_names, phone_data, len(speakers))
eq_(test_data.getPhonem(81280), [0, 0, 0, 0])
eq_(test_data.getPhonem(84841), [0, 0, 0, 18])
eq_(test_data.getPhonem(88201), [14, 14, 14, 14])
class TestLabelProcess(unittest.TestCase):
def setUp(self):
pass
def testLabelCollapse(self):
from .criterion.seq_alignment import collapseLabelChain
input_chain = torch.tensor([[0, 0, 0, 1, 1, 2, 0, 2, 2],
[1, 1, 1, 1, 1, 2, 2, 2, 0]],
dtype=torch.int64)
out_chain, sizes = collapseLabelChain(input_chain)
target = torch.tensor([[0, 1, 2, 0, 2],
[1, 2, 0, 0, 0]],
dtype=torch.int64)
target_size = torch.tensor([5, 3], dtype=torch.int64)
eq_((out_chain - target).sum().item(), 0)
eq_((target_size - sizes).sum().item(), 0)
def test_beam_search(self):
from .criterion.seq_alignment import beam_search
import numpy as np
blank_label = 2
n_keep = 10
data = np.array([[0.1, 0.2, 0.],
[0.4, 0.2, 0.6],
[0.01, 0.3, 0.]])
output = beam_search(data, n_keep, blank_label)
expected_pos_output = [(0.036, [1, 1]), (0.0004, [0]), (0.012, [1]),
(0.024, [1, 0, 1]), (0.0002, [
0, 1, 0]), (0.0, [1, 1, 1]),
(0.0, [1, 1, 0]), (0.0006,
[0, 0]), (0.036, [0, 1]),
(0.0024, [1, 0])]
expected_pos_output.sort(reverse=True)
for index, item in enumerate(expected_pos_output):
eq_(item[1], output[index][1])
ok_(abs(item[0] - output[index][0]) < 1e-08)
def test_big_beam_search(self):
from .criterion.seq_alignment import beam_search
import numpy as np
blank_label = 11
n_keep = 10
data = np.array([[0.1, 0.2, 0., 0., 0., 0., 0., 0.01, 0., 0.1, 0.99, 0.1],
[0.1, 0.2, 0.6, 0.1, 0.9, 0., 0., 0.01, 0., 0.9, 1., 0.]])
output = beam_search(data, n_keep, blank_label)[0]
expected_output = (1.09, [10])
eq_(output[0], expected_output[0])
eq_(output[1], expected_output[1])
class TestPER(unittest.TestCase):
def setUp(self):
pass
def testPER(self):
from .criterion.seq_alignment import get_seq_PER
ref_seq = [0, 1, 1, 2, 0, 2, 2]
pred_seq = [1, 1, 2, 2, 0, 0]
expected_PER = 4. / 7.
eq_(get_seq_PER(ref_seq, pred_seq), expected_PER)
class TestEncoderBuilder(unittest.TestCase):
def setUp(self):
from cpc.cpc_default_config import get_default_cpc_config
self.default_args = get_default_cpc_config()
def testBuildMFCCEncoder(self):
from cpc.model import MFCCEncoder
self.default_args.encoder_type = 'mfcc'
self.default_args.hiddenEncoder = 30
test_encoder = fl.getEncoder(self.default_args)
ok_(isinstance(test_encoder, MFCCEncoder))
eq_(test_encoder.dimEncoded, 30)
def testBuildLFBEnconder(self):
from cpc.model import LFBEnconder
self.default_args.encoder_type = 'lfb'
self.default_args.hiddenEncoder = 12
test_encoder = fl.getEncoder(self.default_args)
ok_(isinstance(test_encoder, LFBEnconder))
eq_(test_encoder.dimEncoded, 12)
def testBuildCPCEncoder(self):
from cpc.model import CPCEncoder
test_encoder = fl.getEncoder(self.default_args)
ok_(isinstance(test_encoder, CPCEncoder))
eq_(test_encoder.dimEncoded, 256)
class TestARBuilder(unittest.TestCase):
def setUp(self):
from cpc.cpc_default_config import get_default_cpc_config
self.default_args = get_default_cpc_config()
def testbuildNoAR(self):
from cpc.model import NoAr
self.default_args.arMode = 'no_ar'
test_ar = fl.getAR(self.default_args)
ok_(isinstance(test_ar, NoAr))
def testbuildNoAR(self):
from cpc.model import CPCAR
self.default_args.arMode = 'LSTM'
test_ar = fl.getAR(self.default_args)
ok_(isinstance(test_ar, CPCAR))
ok_(isinstance(test_ar.baseNet, torch.nn.LSTM))
def testbuildNoAR(self):
from cpc.model import CPCAR
self.default_args.arMode = 'GRU'
test_ar = fl.getAR(self.default_args)
ok_(isinstance(test_ar, CPCAR))
ok_(isinstance(test_ar.baseNet, torch.nn.GRU))
def testbuildNoAR(self):
from cpc.model import CPCAR
self.default_args.arMode = 'RNN'
test_ar = fl.getAR(self.default_args)
ok_(isinstance(test_ar, CPCAR))
ok_(isinstance(test_ar.baseNet, torch.nn.RNN))
| CPC_audio-main | cpc/unit_tests.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
def get_default_cpc_config():
parser = set_default_cpc_config(argparse.ArgumentParser())
return parser.parse_args([])
def set_default_cpc_config(parser):
# Run parameters
group = parser.add_argument_group('Architecture configuration',
description="The arguments defining the "
"model's architecture.")
group.add_argument('--hiddenEncoder', type=int, default=256,
help='Hidden dimension of the encoder network.')
group.add_argument('--hiddenGar', type=int, default=256,
help='Hidden dimension of the auto-regressive network')
group.add_argument('--nPredicts', type=int, default=12,
help='Number of steps to predict.')
group.add_argument('--negativeSamplingExt', type=int, default=128,
help='Number of negative samples to take.')
group.add_argument('--learningRate', type=float, default=2e-4)
group.add_argument('--schedulerStep', type=int, default=-1,
help='Step of the learning rate scheduler: at each '
'step the learning rate is divided by 2. Default: '
'no scheduler.')
group.add_argument('--schedulerRamp', type=int, default=None,
help='Enable a warm up phase for the learning rate: '
'adds a linear ramp of the given size.')
group.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer')
group.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer')
group.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer')
group.add_argument('--sizeWindow', type=int, default=20480,
help='Number of frames to consider at each batch.')
group.add_argument('--nEpoch', type=int, default=200,
help='Number of epoch to run')
group.add_argument('--samplingType', type=str, default='samespeaker',
choices=['samespeaker', 'uniform',
'samesequence', 'sequential'],
help='How to sample the negative examples in the '
'CPC loss.')
group.add_argument('--nLevelsPhone', type=int, default=1,
help='(Supervised mode only). Number of layers in '
'the phone classification network.')
group.add_argument('--cpc_mode', type=str, default=None,
choices=['reverse', 'none'],
help='Some variations on CPC.')
group.add_argument('--encoder_type', type=str,
choices=['cpc', 'mfcc', 'lfb'],
default='cpc',
help='Replace the encoder network by mfcc features '
'or learned filter banks')
group.add_argument('--normMode', type=str, default='layerNorm',
choices=['instanceNorm', 'ID', 'layerNorm',
'batchNorm'],
help="Type of normalization to use in the encoder "
"network (default is layerNorm).")
group.add_argument('--onEncoder', action='store_true',
help="(Supervised mode only) Perform the "
"classification on the encoder's output.")
group.add_argument('--random_seed', type=int, default=None,
help="Set a specific random seed.")
group.add_argument('--speakerEmbedding', type=int, default=0,
help="(Depreciated) Feed the prediction network with "
"speaker embeddings along with the usual sequence.")
group.add_argument('--arMode', default='LSTM',
choices=['GRU', 'LSTM', 'RNN', 'no_ar', 'transformer'],
help="Architecture to use for the auto-regressive "
"network (default is lstm).")
group.add_argument('--nLevelsGRU', type=int, default=1,
help='Number of layers in the autoregressive network.')
group.add_argument('--rnnMode', type=str, default='transformer',
choices=['transformer', 'RNN', 'LSTM', 'linear',
'ffd', 'conv4', 'conv8', 'conv12'],
help="Architecture to use for the prediction network")
group.add_argument('--dropout', action='store_true',
help="Add a dropout layer at the output of the "
"prediction network.")
group.add_argument('--abspos', action='store_true',
help='If the prediction network is a transformer, '
'active to use absolute coordinates.')
return parser
| CPC_audio-main | cpc/cpc_default_config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import random
import torch
import sys
import psutil
from copy import deepcopy
from bisect import bisect_left
def untensor(d):
if isinstance(d, list):
return [untensor(v) for v in d]
if isinstance(d, dict):
return dict((k, untensor(v)) for k, v in d.items())
if hasattr(d, 'tolist'):
return d.tolist()
return d
def save_logs(data, pathLogs):
with open(pathLogs, 'w') as file:
json.dump(data, file, indent=2)
def update_logs(logs, logStep, prevlogs=None):
out = {}
for key in logs:
out[key] = deepcopy(logs[key])
if prevlogs is not None:
out[key] -= prevlogs[key]
out[key] /= logStep
return out
def show_logs(text, logs):
print("")
print('-'*50)
print(text)
for key in logs:
if key == "iter":
continue
nPredicts = logs[key].shape[0]
strSteps = ['Step'] + [str(s) for s in range(1, nPredicts + 1)]
formatCommand = ' '.join(['{:>16}' for x in range(nPredicts + 1)])
print(formatCommand.format(*strSteps))
strLog = [key] + ["{:10.6f}".format(s) for s in logs[key]]
print(formatCommand.format(*strLog))
print('-'*50)
def set_seed(seed):
random.seed(seed)
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def cpu_stats():
print(sys.version)
print(psutil.cpu_percent())
print(psutil.virtual_memory())
def ramp_scheduling_function(n_epoch_ramp, epoch):
if epoch >= n_epoch_ramp:
return 1
else:
return (epoch + 1) / n_epoch_ramp
class SchedulerCombiner:
r"""
An object which applies a list of learning rate schedulers sequentially.
"""
def __init__(self, scheduler_list, activation_step, curr_step=0):
r"""
Args:
- scheduler_list (list): a list of learning rate schedulers
- activation_step (list): a list of int. activation_step[i]
indicates at which step scheduler_list[i] should be activated
- curr_step (int): the starting step. Must be lower than
activation_step[0]
"""
if len(scheduler_list) != len(activation_step):
raise ValueError("The number of scheduler must be the same as "
"the number of activation step")
if activation_step[0] > curr_step:
raise ValueError("The first activation step cannot be higher than "
"the current step.")
self.scheduler_list = scheduler_list
self.activation_step = deepcopy(activation_step)
self.curr_step = curr_step
def step(self):
self.curr_step += 1
index = bisect_left(self.activation_step, self.curr_step) - 1
for i in reversed(range(index, len(self.scheduler_list))):
self.scheduler_list[i].step()
def __str__(self):
out = "SchedulerCombiner \n"
out += "(\n"
for index, scheduler in enumerate(self.scheduler_list):
out += f"({index}) {scheduler.__str__()} \n"
out += ")\n"
return out
| CPC_audio-main | cpc/utils/misc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CPC_audio-main | cpc/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
import os
from nose.tools import eq_, ok_
from .misc import SchedulerCombiner, ramp_scheduling_function
class TestCombineSchedulers(unittest.TestCase):
def setUp(self):
self.baseLR = 1
self.module = torch.nn.Linear(1, 1)
self.optimizer = torch.optim.SGD(
list(self.module.parameters()), lr=self.baseLR)
def testCombineRamp(self):
scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer,
lr_lambda=lambda epoch: ramp_scheduling_function(
3, epoch))
self.optimizer.step()
eq_(self.optimizer.param_groups[0]['lr'], self.baseLR / 3)
scheduler.step()
eq_(self.optimizer.param_groups[0]['lr'], 2 * self.baseLR / 3)
scheduler.step()
eq_(self.optimizer.param_groups[0]['lr'], 1)
for i in range(12):
scheduler.step()
eq_(self.optimizer.param_groups[0]['lr'], 1)
def testCombineRampStep(self):
scheduler_step = torch.optim.lr_scheduler.StepLR(
self.optimizer, 6, gamma=0.5)
scheduler_ramp = torch.optim.lr_scheduler.LambdaLR(self.optimizer,
lr_lambda=lambda epoch: ramp_scheduling_function(
3, epoch))
scheduler = SchedulerCombiner([scheduler_ramp, scheduler_step], [0, 3])
self.optimizer.step()
# Epoch 0
eq_(self.optimizer.param_groups[0]['lr'], self.baseLR / 3)
scheduler.step()
# Epoch 1
eq_(self.optimizer.param_groups[0]['lr'], 2 * self.baseLR / 3)
scheduler.step()
# Epoch 2
eq_(self.optimizer.param_groups[0]['lr'], 1)
scheduler.step()
# Epoch 3, 4, 5
for i in range(3):
eq_(self.optimizer.param_groups[0]['lr'], 1)
scheduler.step()
# Epoch 6
eq_(self.optimizer.param_groups[0]['lr'], 0.5)
| CPC_audio-main | cpc/utils/unit_tests.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
import torch.nn as nn
from numpy import prod
class NormalizationLayer(nn.Module):
def __init__(self):
super(NormalizationLayer, self).__init__()
def forward(self, x, epsilon=1e-8):
return x * (((x**2).mean(dim=1, keepdim=True) + epsilon).rsqrt())
def Upscale2d(x, factor=2):
assert isinstance(factor, int) and factor >= 1
if factor == 1:
return x
s = x.size()
x = x.view(-1, s[1], s[2], 1, s[3], 1)
x = x.expand(-1, s[1], s[2], factor, s[3], factor)
x = x.contiguous().view(-1, s[1], s[2] * factor, s[3] * factor)
return x
def getLayerNormalizationFactor(x):
r"""
Get He's constant for the given layer
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
"""
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
r"""
A handy refactor that allows the user to:
- initialize one layer's bias to zero
- apply He's initialization at runtime
"""
def __init__(self,
module,
equalized=True,
lrMul=1.0,
initBiasToZero=True):
r"""
equalized (bool): if true, the layer's weight should evolve within
the range (-1, 1)
initBiasToZero (bool): if true, bias will be initialized to zero
"""
super(ConstrainedLayer, self).__init__()
self.module = module
self.equalized = equalized
if initBiasToZero and module.bias is not None:
self.module.bias.data.fill_(0)
if self.equalized:
self.module.weight.data.normal_(0, 1)
self.weight = getLayerNormalizationFactor(self.module) * lrMul
def forward(self, x):
x = self.module(x)
if self.equalized:
x *= self.weight
return x
class EqualizedConv1d(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
kernelSize,
padding=0,
bias=True,
stride=1,
**kwargs):
r"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Conv1d(nChannelsPrevious, nChannels,
kernelSize, padding=padding,
bias=bias, stride=stride),
**kwargs)
class EqualizedConv2d(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
kernelSize,
padding=0,
bias=True,
**kwargs):
r"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Conv2d(nChannelsPrevious, nChannels,
kernelSize, padding=padding,
bias=bias),
**kwargs)
class EqualizedLinear(ConstrainedLayer):
def __init__(self,
nChannelsPrevious,
nChannels,
bias=True,
**kwargs):
r"""
A nn.Linear module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self,
nn.Linear(nChannelsPrevious, nChannels,
bias=bias), **kwargs)
| CPC_audio-main | cpc/criterion/custom_layers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .criterion import CPCUnsupersivedCriterion, SpeakerCriterion, \
PhoneCriterion, NoneCriterion, CTCPhoneCriterion
| CPC_audio-main | cpc/criterion/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import progressbar
import torch
from multiprocessing import Lock, Manager, Process
from copy import deepcopy
def beam_search(score_preds, nKeep, blankLabel):
T, P = score_preds.shape
beams = set([''])
pb_t_1 = {"": 1}
pnb_t_1 = {"": 0}
def getLastNumber(b):
return int(b.split(',')[-1])
for t in range(T):
nextBeams = set()
pb_t = {}
pnb_t = {}
for i_beam, b in enumerate(beams):
if b not in pb_t:
pb_t[b] = 0
pnb_t[b] = 0
if len(b) > 0:
pnb_t[b] += pnb_t_1[b] * score_preds[t, getLastNumber(b)]
pb_t[b] = (pnb_t_1[b] + pb_t_1[b]) * score_preds[t, blankLabel]
nextBeams.add(b)
for c in range(P):
if c == blankLabel:
continue
b_ = b + "," + str(c)
if b_ not in pb_t:
pb_t[b_] = 0
pnb_t[b_] = 0
if b != "" and getLastNumber(b) == c:
pnb_t[b_] += pb_t_1[b] * score_preds[t, c]
else:
pnb_t[b_] += (pb_t_1[b] + pnb_t_1[b]) * score_preds[t, c]
nextBeams.add(b_)
allPreds = [(pb_t[b] + pnb_t[b], b) for b in nextBeams]
allPreds.sort(reverse=True)
beams = [x[1] for x in allPreds[:nKeep]]
pb_t_1 = deepcopy(pb_t)
pnb_t_1 = deepcopy(pnb_t)
output = []
for score, x in allPreds[:nKeep]:
output.append((score, [int(y) for y in x.split(',') if len(y) > 0]))
return output
def collapseLabelChain(inputLabels):
# Shape N,T
N, T = inputLabels.size()
outSizes = torch.zeros(N, device=inputLabels.device, dtype=torch.int64)
output = []
for l in range(N):
status = inputLabels[l, :-1] - inputLabels[l, 1:]
status = torch.cat([torch.ones(1, device=status.device,
dtype=status.dtype),
status], dim=0)
outSizes[l] = (status != 0).sum()
output.append(inputLabels[l][status != 0])
maxSize = int(outSizes.max().item())
paddedOutput = torch.zeros(N, maxSize,
device=inputLabels.device,
dtype=torch.int64)
for l in range(N):
S = int(outSizes[l])
paddedOutput[l, :S] = output[l]
return paddedOutput, outSizes
def NeedlemanWunschAlignScore(seq1, seq2, d, m, r, normalize=True):
N1, N2 = len(seq1), len(seq2)
# Fill up the errors
tmpRes_ = [[None for x in range(N2 + 1)] for y in range(N1 + 1)]
for i in range(N1 + 1):
tmpRes_[i][0] = i * d
for j in range(N2 + 1):
tmpRes_[0][j] = j * d
for i in range(N1):
for j in range(N2):
match = r if seq1[i] == seq2[j] else m
v1 = tmpRes_[i][j] + match
v2 = tmpRes_[i + 1][j] + d
v3 = tmpRes_[i][j + 1] + d
tmpRes_[i + 1][j + 1] = max(v1, max(v2, v3))
i = j = 0
res = -tmpRes_[N1][N2]
if normalize:
res /= float(N1)
return res
def get_seq_PER(seqLabels, detectedLabels):
return NeedlemanWunschAlignScore(seqLabels, detectedLabels, -1, -1, 0,
normalize=True)
def getPER(dataLoader, featureMaker, blankLabel):
bar = progressbar.ProgressBar(len(dataLoader))
bar.start()
out = 0
n_items = 0
n_keep_beam_search = 100
for index, data in enumerate(dataLoader):
bar.update(index)
with torch.no_grad():
output = featureMaker(data).cpu().numpy()
labels = data[1]
labels, targetSize = collapseLabelChain(labels)
lock = Lock()
def per(rank, outScore):
S = int(targetSize[rank])
seqLabels = labels[rank, :S]
preds = beam_search(output[rank],
n_keep_beam_search, blankLabel)[0][1]
value = get_seq_PER(seqLabels, preds)
with lock:
outScore.value += value
manager = Manager()
outScore = manager.Value('f', 0.)
N, S, D = output.shape
processes = []
for rank in range(N):
p = Process(
target=per, args=(rank, outScore))
p.start()
processes.append(p)
for p in processes:
p.join()
out += outScore.value
n_items += N
bar.finish()
return (out / n_items)
| CPC_audio-main | cpc/criterion/seq_alignment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from .seq_alignment import collapseLabelChain
from .custom_layers import EqualizedLinear, EqualizedConv1d
class FFNetwork(nn.Module):
def __init__(self, din, dout, dff, dropout):
super(FFNetwork, self).__init__()
self.lin1 = EqualizedLinear(din, dff, bias=True, equalized=True)
self.lin2 = EqualizedLinear(dff, dout, bias=True, equalized=True)
self.relu = nn.ReLU()
self.drop = nn.Dropout(dropout)
def forward(self, x):
return self.lin2(self.drop(self.relu(self.lin1(x))))
class ShiftedConv(nn.Module):
def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize):
super(ShiftedConv, self).__init__()
self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder,
kernelSize, equalized=True,
padding=0)
self.kernelSize = kernelSize
def forward(self, x):
# Input format: N, S, C -> need to move to N, C, S
N, S, C = x.size()
x = x.permute(0, 2, 1)
padding = torch.zeros(N, C, self.kernelSize - 1, device=x.device)
x = torch.cat([padding, x], dim=2)
x = self.module(x)
x = x.permute(0, 2, 1)
return x
class PredictionNetwork(nn.Module):
def __init__(self,
nPredicts,
dimOutputAR,
dimOutputEncoder,
rnnMode=None,
dropout=False,
sizeInputSeq=116):
super(PredictionNetwork, self).__init__()
self.predictors = nn.ModuleList()
self.RESIDUAL_STD = 0.01
self.dimOutputAR = dimOutputAR
self.dropout = nn.Dropout(p=0.5) if dropout else None
for i in range(nPredicts):
if rnnMode == 'RNN':
self.predictors.append(
nn.RNN(dimOutputAR, dimOutputEncoder))
self.predictors[-1].flatten_parameters()
elif rnnMode == 'LSTM':
self.predictors.append(
nn.LSTM(dimOutputAR, dimOutputEncoder, batch_first=True))
self.predictors[-1].flatten_parameters()
elif rnnMode == 'ffd':
self.predictors.append(
FFNetwork(dimOutputAR, dimOutputEncoder,
dimOutputEncoder, 0))
elif rnnMode == 'conv4':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 4))
elif rnnMode == 'conv8':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 8))
elif rnnMode == 'conv12':
self.predictors.append(
ShiftedConv(dimOutputAR, dimOutputEncoder, 12))
elif rnnMode == 'transformer':
from transformers import buildTransformerAR
self.predictors.append(
buildTransformerAR(dimOutputEncoder,
1,
sizeInputSeq,
False))
else:
self.predictors.append(
nn.Linear(dimOutputAR, dimOutputEncoder, bias=False))
if dimOutputEncoder > dimOutputAR:
residual = dimOutputEncoder - dimOutputAR
self.predictors[-1].weight.data.copy_(torch.cat([torch.randn(
dimOutputAR, dimOutputAR), self.RESIDUAL_STD * torch.randn(residual, dimOutputAR)], dim=0))
def forward(self, c, candidates):
assert(len(candidates) == len(self.predictors))
out = []
# UGLY
if isinstance(self.predictors[0], EqualizedConv1d):
c = c.permute(0, 2, 1)
for k in range(len(self.predictors)):
locC = self.predictors[k](c)
if isinstance(locC, tuple):
locC = locC[0]
if isinstance(self.predictors[k], EqualizedConv1d):
locC = locC.permute(0, 2, 1)
if self.dropout is not None:
locC = self.dropout(locC)
locC = locC.view(locC.size(0), 1, locC.size(1), locC.size(2))
outK = (locC*candidates[k]).mean(dim=3)
out.append(outK)
return out
class BaseCriterion(nn.Module):
def warmUp(self):
return False
def update(self):
return
class NoneCriterion(BaseCriterion):
def __init__(self):
super(NoneCriterion, self).__init__()
def forward(self, cFeature, encodedData, label):
return torch.zeros(1, 1, device=cFeature.device), \
torch.zeros(1, 1, device=cFeature.device)
class CPCUnsupersivedCriterion(BaseCriterion):
def __init__(self,
nPredicts, # Number of steps
dimOutputAR, # Dimension of G_ar
dimOutputEncoder, # Dimension of the convolutional net
negativeSamplingExt, # Number of negative samples to draw
mode=None,
rnnMode=False,
dropout=False,
speakerEmbedding=0,
nSpeakers=0,
sizeInputSeq=128):
super(CPCUnsupersivedCriterion, self).__init__()
if speakerEmbedding > 0:
print(
f"Using {speakerEmbedding} speaker embeddings for {nSpeakers} speakers")
self.speakerEmb = torch.nn.Embedding(nSpeakers, speakerEmbedding)
dimOutputAR += speakerEmbedding
else:
self.speakerEmb = None
self.wPrediction = PredictionNetwork(
nPredicts, dimOutputAR, dimOutputEncoder, rnnMode=rnnMode,
dropout=dropout, sizeInputSeq=sizeInputSeq - nPredicts)
self.nPredicts = nPredicts
self.negativeSamplingExt = negativeSamplingExt
self.lossCriterion = nn.CrossEntropyLoss()
if mode not in [None, "reverse"]:
raise ValueError("Invalid mode")
self.mode = mode
def sampleClean(self, encodedData, windowSize):
batchSize, nNegativeExt, dimEncoded = encodedData.size()
outputs = []
negExt = encodedData.contiguous().view(-1, dimEncoded)
# Draw nNegativeExt * batchSize negative samples anywhere in the batch
batchIdx = torch.randint(low=0, high=batchSize,
size=(self.negativeSamplingExt
* windowSize * batchSize, ),
device=encodedData.device)
seqIdx = torch.randint(low=1, high=nNegativeExt,
size=(self.negativeSamplingExt
* windowSize * batchSize, ),
device=encodedData.device)
baseIdx = torch.arange(0, windowSize, device=encodedData.device)
baseIdx = baseIdx.view(1, 1,
windowSize).expand(1,
self.negativeSamplingExt,
windowSize).expand(batchSize, self.negativeSamplingExt, windowSize)
seqIdx += baseIdx.contiguous().view(-1)
seqIdx = torch.remainder(seqIdx, nNegativeExt)
extIdx = seqIdx + batchIdx * nNegativeExt
negExt = negExt[extIdx].view(batchSize, self.negativeSamplingExt,
windowSize, dimEncoded)
labelLoss = torch.zeros((batchSize * windowSize),
dtype=torch.long,
device=encodedData.device)
for k in range(1, self.nPredicts + 1):
# Positive samples
if k < self.nPredicts:
posSeq = encodedData[:, k:-(self.nPredicts-k)]
else:
posSeq = encodedData[:, k:]
posSeq = posSeq.view(batchSize, 1, posSeq.size(1), dimEncoded)
fullSeq = torch.cat((posSeq, negExt), dim=1)
outputs.append(fullSeq)
return outputs, labelLoss
def getInnerLoss(self):
return "orthoLoss", self.orthoLoss * self.wPrediction.orthoCriterion()
def forward(self, cFeature, encodedData, label):
if self.mode == "reverse":
encodedData = torch.flip(encodedData, [1])
cFeature = torch.flip(cFeature, [1])
batchSize, seqSize, dimAR = cFeature.size()
windowSize = seqSize - self.nPredicts
cFeature = cFeature[:, :windowSize]
sampledData, labelLoss = self.sampleClean(encodedData, windowSize)
if self.speakerEmb is not None:
l_ = label.view(batchSize, 1).expand(batchSize, windowSize)
embeddedSpeaker = self.speakerEmb(l_)
cFeature = torch.cat([cFeature, embeddedSpeaker], dim=2)
predictions = self.wPrediction(cFeature, sampledData)
outLosses = [0 for x in range(self.nPredicts)]
outAcc = [0 for x in range(self.nPredicts)]
for k, locPreds in enumerate(predictions[:self.nPredicts]):
locPreds = locPreds.permute(0, 2, 1)
locPreds = locPreds.contiguous().view(-1, locPreds.size(2))
lossK = self.lossCriterion(locPreds, labelLoss)
outLosses[k] += lossK.view(1, -1)
_, predsIndex = locPreds.max(1)
outAcc[k] += torch.sum(predsIndex == labelLoss).float().view(1, -1)
return torch.cat(outLosses, dim=1), \
torch.cat(outAcc, dim=1) / (windowSize * batchSize)
class SpeakerCriterion(BaseCriterion):
def __init__(self, dimEncoder, nSpeakers):
super(SpeakerCriterion, self).__init__()
self.linearSpeakerClassifier = nn.Linear(
dimEncoder, nSpeakers)
self.lossCriterion = nn.CrossEntropyLoss()
self.entropyCriterion = nn.LogSoftmax(dim=1)
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
batchSize = cFeature.size(0)
cFeature = cFeature[:, -1, :]
cFeature = cFeature.view(batchSize, -1)
predictions = self.linearSpeakerClassifier(cFeature)
loss = self.lossCriterion(predictions, label).view(1, -1)
acc = (predictions.max(1)[1] == label).double().mean().view(1, -1)
return loss, acc
class PhoneCriterion(BaseCriterion):
def __init__(self, dimEncoder, nPhones, onEncoder,
nLayers=1):
super(PhoneCriterion, self).__init__()
if nLayers == 1:
self.PhoneCriterionClassifier = nn.Linear(dimEncoder, nPhones)
else:
outLayers = [nn.Linear(dimEncoder, nPhones)]
for l in range(nLayers - 1):
outLayers.append(nn.ReLU())
outLayers.append(nn.Linear(nPhones, nPhones))
self.PhoneCriterionClassifier = nn.Sequential(*outLayers)
self.lossCriterion = nn.CrossEntropyLoss()
self.onEncoder = onEncoder
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
if self.onEncoder:
predictions = self.getPrediction(otherEncoded)
else:
predictions = self.getPrediction(cFeature)
predictions = predictions.view(-1, predictions.size(2))
label = label.view(-1)
loss = self.lossCriterion(predictions, label).view(1, -1)
acc = (predictions.max(1)[1] == label).double().mean().view(1, -1)
return loss, acc
def getPrediction(self, cFeature):
batchSize, seqSize = cFeature.size(0), cFeature.size(1)
cFeature = cFeature.contiguous().view(batchSize * seqSize, -1)
output = self.PhoneCriterionClassifier(cFeature)
return output.view(batchSize, seqSize, -1)
class CTCPhoneCriterion(BaseCriterion):
def __init__(self, dimEncoder, nPhones, onEncoder):
super(CTCPhoneCriterion, self).__init__()
self.PhoneCriterionClassifier = nn.Linear(dimEncoder, nPhones + 1)
self.lossCriterion = nn.CTCLoss(blank=nPhones, zero_infinity=True)
self.onEncoder = onEncoder
if onEncoder:
raise ValueError("On encoder version not implemented yet")
self.BLANK_LABEL = nPhones
def getPrediction(self, cFeature):
B, S, H = cFeature.size()
cFeature = cFeature.contiguous().view(B*S, H)
return self.PhoneCriterionClassifier(cFeature).view(B, S, -1)
def forward(self, cFeature, otherEncoded, label):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature)
label = label.to(predictions.device)
label, sizeLabels = collapseLabelChain(label)
avgPER = 0.
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
targetSizePred = torch.ones(B, dtype=torch.int64,
device=predictions.device) * S
loss = self.lossCriterion(predictions, label,
targetSizePred, sizeLabels).view(1, -1)
return loss, avgPER * torch.ones(1, 1, device=loss.device)
class ModelCriterionCombined(torch.nn.Module):
def __init__(self, model, criterion):
super(ModelCriterionCombined, self).__init__()
self.model = model
self.criterion = criterion
def forward(self, data, label):
c_feature, encoded_data, label = self.model(data, label)
loss, acc = self.criterion(c_feature, encoded_data, label)
return loss, acc
| CPC_audio-main | cpc/criterion/criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import torch
import json
from pathlib import Path
import ABX.abx_group_computation as abx_g
import ABX.abx_iterators as abx_it
from cpc.dataset import findAllSeqs
from cpc.feature_loader import buildFeature, FeatureModule, loadModel
def reduce_sparse_data(quotient, divisor):
return quotient / (1e-08 * (divisor == 0) + divisor)
def ABX(feature_function,
path_item_file,
seq_list,
distance_mode,
step_feature,
modes,
seq_norm=True,
cuda=False,
max_x_across=5,
max_size_group=30):
# ABX dataset
ABXDataset = abx_it.ABXFeatureLoader(path_item_file, seq_list,
feature_function, step_feature, True)
if cuda:
ABXDataset.cuda()
# Distance function
distance_function = abx_g.get_distance_function_from_name(distance_mode)
# Output
scores = {}
# ABX within
if 'within' in modes:
print("Computing ABX within speakers...")
ABXIterator = ABXDataset.get_iterator('within', max_size_group)
group_confusion = abx_g.get_abx_scores_dtw_on_group(ABXIterator,
distance_function,
ABXIterator.symmetric)
n_data = group_confusion._values().size(0)
index_ = torch.sparse.LongTensor(group_confusion._indices(),
torch.ones((n_data),
dtype=torch.float),
group_confusion.size())
divisor_context = torch.sparse.sum(index_, dim=3).to_dense()
group_confusion = torch.sparse.sum(group_confusion, dim=3).to_dense()
group_confusion = reduce_sparse_data(group_confusion, divisor_context)
S, p1, p2 = group_confusion.size()
index_speaker = divisor_context > 0
divisor_speaker = index_speaker.sum(dim=0)
phone_confusion = reduce_sparse_data(group_confusion.sum(dim=0),
divisor_speaker)
scores['within'] = (phone_confusion.sum() /
(divisor_speaker > 0).sum()).item()
print(f"...done. ABX within : {scores['within']}")
# ABX across
if 'across' in modes:
print("Computing ABX across speakers...")
ABXIterator = ABXDataset.get_iterator('across', max_size_group)
ABXIterator.max_x = max_x_across
group_confusion = abx_g.get_abx_scores_dtw_on_group(ABXIterator,
distance_function,
ABXIterator.symmetric)
n_data = group_confusion._values().size(0)
index_ = torch.sparse.LongTensor(group_confusion._indices(),
torch.ones((n_data),
dtype=torch.float),
group_confusion.size())
divisor_context = torch.sparse.sum(index_, dim=[3, 4]).to_dense()
group_confusion = torch.sparse.sum(
group_confusion, dim=[3, 4]).to_dense()
group_confusion = reduce_sparse_data(group_confusion, divisor_context)
S, p1, p2 = group_confusion.size()
index_speaker = divisor_context > 0
divisor_speaker = index_speaker.sum(dim=0)
phone_confusion = reduce_sparse_data(group_confusion.sum(dim=0),
divisor_speaker)
scores['across'] = (phone_confusion.sum() /
(divisor_speaker > 0).sum()).item()
print(f"...done. ABX across : {scores['across']}")
return scores
def update_base_parser(parser):
parser.add_argument('--debug', action='store_true')
parser.add_argument('--feature_size', type=int, default=0.01,
help="Size (in s) of one feature")
parser.add_argument('--cuda', action='store_true',
help="Use the GPU to compute distances")
parser.add_argument('--mode', type=str, default='all',
choices=['all', 'within', 'across'],
help="Type of ABX score to compute")
parser.add_argument("--max_size_group", type=int, default=10,
help="Max size of a group while computing the"
"ABX score")
parser.add_argument("--max_x_across", type=int, default=5,
help="When computing the ABX across score, maximum"
"number of speaker X to sample per couple A,B")
parser.add_argument("--out", type=str, default=None,
help="Path where the results should be saved")
def parse_args(argv):
base_parser = argparse.ArgumentParser(description='ABX metric')
subparsers = base_parser.add_subparsers(dest='load')
parser_checkpoint = subparsers.add_parser('from_checkpoint')
update_base_parser(parser_checkpoint)
parser_checkpoint.add_argument('path_checkpoint', type=str,
help="Path to the model's checkpoint")
parser_checkpoint.add_argument('path_item_file', type=str,
help="Path to the ABX .item file containing "
"the triplets labels")
parser_checkpoint.add_argument('path_dataset', type=str,
help="Path to the dataset")
parser_checkpoint.add_argument('--seq_norm', action='store_true',
help='If activated, normalize each batch '
'of feature across the time channel before '
'computing ABX.')
parser_checkpoint.add_argument('--max_size_seq', default=64000, type=int,
help='Maximal number of frames to consider '
'when computing a batch of features.')
parser_checkpoint.add_argument('--strict', action='store_true',
help='If activated, each batch of feature '
'will contain exactly max_size_seq frames.')
parser_checkpoint.add_argument('--file_extension', type=str,
default='.wav',
help='Extension of ecah audio file in the '
'dataset.')
parser_checkpoint.add_argument('--get_encoded', action='store_true',
help='If activated, compute the ABX score '
'using the output of the encoder network.')
parser_db = subparsers.add_parser('from_pre_computed')
update_base_parser(parser_db)
parser_db.add_argument('path_features', type=str,
help="Path to pre-computed torch features (.pt)")
parser_db.add_argument('--file_extension', type=str,
default='.pt', help='Extension of each feature '
'in the dataset')
# multi-gpu / multi-node
return base_parser.parse_args(argv)
def main(argv):
args = parse_args(argv)
if args.load == 'from_checkpoint':
# Checkpoint
model = loadModel([args.path_checkpoint])[0]
model.gAR.keepHidden = True
# Feature maker
feature_maker = FeatureModule(model, args.get_encoded).cuda().eval()
def feature_function(x): return buildFeature(feature_maker, x,
seqNorm=args.seq_norm,
strict=args.strict,
maxSizeSeq=args.max_size_seq)
elif args.load == 'from_pre_computed':
def feature_function(x): return torch.load(x, 'cpu')
# Modes
if args.mode == 'all':
modes = ["within", "across"]
else:
modes = [args.mode]
distance_mode = 'cosine'
step_feature = 1 / args.feature_size
# Get the list of sequences
seq_list, _ = findAllSeqs(args.path_dataset, extension=args.file_extension)
seq_list = [(str(Path(x).stem), str(Path(args.path_dataset) / x))
for (_, x) in seq_list]
if args.debug:
seq_list = seq_list[:1000]
scores = ABX(feature_function, args.path_item_file,
seq_list, distance_mode,
step_feature, modes,
cuda=args.cuda,
seq_norm=args.seq_norm,
max_x_across=args.max_x_across,
max_size_group=args.max_size_group)
out_dir = Path(args.path_checkpoint).parent if args.out is None \
else Path(args.out)
out_dir.mkdir(exist_ok=True)
path_score = out_dir / 'ABX_scores.json'
with open(path_score, 'w') as file:
json.dump(scores, file, indent=2)
path_args = out_dir / 'ABX_args.json'
with open(path_args, 'w') as file:
json.dump(vars(args), file, indent=2)
if __name__ == "__main__":
args = sys.argv[1:]
main(args)
| CPC_audio-main | cpc/eval/ABX.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torchaudio
from copy import deepcopy
import torch
import time
import random
import math
import json
import subprocess
import sys
import progressbar
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
from torch.multiprocessing import Pool
from cpc.criterion.seq_alignment import get_seq_PER
from cpc.criterion.seq_alignment import beam_search
from cpc.feature_loader import loadModel
from cpc.dataset import findAllSeqs, parseSeqLabels, filterSeqs
def load(path_item):
seq_name = path_item.stem
data = torchaudio.load(str(path_item))[0].view(1, -1)
return seq_name, data
class SingleSequenceDataset(Dataset):
def __init__(self,
pathDB,
seqNames,
phoneLabelsDict,
inDim=1,
transpose=True):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabels (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
"""
self.seqNames = deepcopy(seqNames)
self.pathDB = pathDB
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.inDim = inDim
self.transpose = transpose
self.loadSeqs()
def loadSeqs(self):
# Labels
self.seqOffset = [0]
self.phoneLabels = []
self.phoneOffsets = [0]
self.data = []
self.maxSize = 0
self.maxSizePhone = 0
# Data
nprocess = min(30, len(self.seqNames))
start_time = time.time()
to_load = [Path(self.pathDB) / x for _, x in self.seqNames]
with Pool(nprocess) as p:
poolData = p.map(load, to_load)
tmpData = []
poolData.sort()
totSize = 0
minSizePhone = float('inf')
for seqName, seq in poolData:
self.phoneLabels += self.phoneLabelsDict[seqName]
self.phoneOffsets.append(len(self.phoneLabels))
self.maxSizePhone = max(self.maxSizePhone, len(
self.phoneLabelsDict[seqName]))
minSizePhone = min(minSizePhone, len(
self.phoneLabelsDict[seqName]))
sizeSeq = seq.size(1)
self.maxSize = max(self.maxSize, sizeSeq)
totSize += sizeSeq
tmpData.append(seq)
self.seqOffset.append(self.seqOffset[-1] + sizeSeq)
del seq
self.data = torch.cat(tmpData, dim=1)
self.phoneLabels = torch.tensor(self.phoneLabels, dtype=torch.long)
print(f'Loaded {len(self.phoneOffsets)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f'maxSizeSeq : {self.maxSize}')
print(f'maxSizePhone : {self.maxSizePhone}')
print(f"minSizePhone : {minSizePhone}")
print(f'Total size dataset {totSize / (16000 * 3600)} hours')
def __getitem__(self, idx):
offsetStart = self.seqOffset[idx]
offsetEnd = self.seqOffset[idx+1]
offsetPhoneStart = self.phoneOffsets[idx]
offsetPhoneEnd = self.phoneOffsets[idx + 1]
sizeSeq = int(offsetEnd - offsetStart)
sizePhone = int(offsetPhoneEnd - offsetPhoneStart)
outSeq = torch.zeros((self.inDim, self.maxSize))
outPhone = torch.zeros((self.maxSizePhone))
outSeq[:, :sizeSeq] = self.data[:, offsetStart:offsetEnd]
outPhone[:sizePhone] = self.phoneLabels[offsetPhoneStart:offsetPhoneEnd]
return outSeq, torch.tensor([sizeSeq], dtype=torch.long), outPhone.long(), torch.tensor([sizePhone], dtype=torch.long)
def __len__(self):
return len(self.seqOffset) - 1
class CTCphone_criterion(torch.nn.Module):
def __init__(self, dimEncoder, nPhones, LSTM=False, sizeKernel=8,
seqNorm=False, dropout=False, reduction='sum'):
super(CTCphone_criterion, self).__init__()
self.seqNorm = seqNorm
self.epsilon = 1e-8
self.dropout = torch.nn.Dropout2d(
p=0.5, inplace=False) if dropout else None
self.conv1 = torch.nn.LSTM(dimEncoder, dimEncoder,
num_layers=1, batch_first=True)
self.PhoneCriterionClassifier = torch.nn.Conv1d(
dimEncoder, nPhones + 1, sizeKernel, stride=sizeKernel // 2)
self.lossCriterion = torch.nn.CTCLoss(blank=nPhones,
reduction=reduction,
zero_infinity=True)
self.relu = torch.nn.ReLU()
self.BLANK_LABEL = nPhones
self.useLSTM = LSTM
def getPrediction(self, cFeature, featureSize):
B, S, H = cFeature.size()
if self.seqNorm:
for b in range(B):
size = featureSize[b]
m = cFeature[b, :size].mean(dim=0, keepdim=True)
v = cFeature[b, :size].var(dim=0, keepdim=True)
cFeature[b] = (cFeature[b] - m) / torch.sqrt(v + self.epsilon)
if self.useLSTM:
cFeature = self.conv1(cFeature)[0]
cFeature = cFeature.permute(0, 2, 1)
if self.dropout is not None:
cFeature = self.dropout(cFeature)
cFeature = self.PhoneCriterionClassifier(cFeature)
return cFeature.permute(0, 2, 1)
def forward(self, cFeature, featureSize, label, labelSize):
# cFeature.size() : batchSize x seq Size x hidden size
B, S, H = cFeature.size()
predictions = self.getPrediction(cFeature, featureSize)
featureSize /= 4
predictions = cut_data(predictions, featureSize)
featureSize = torch.clamp(featureSize, max=predictions.size(1))
label = cut_data(label, labelSize)
if labelSize.min() <= 0:
print(label, labelSize)
predictions = torch.nn.functional.log_softmax(predictions, dim=2)
predictions = predictions.permute(1, 0, 2)
loss = self.lossCriterion(predictions, label,
featureSize, labelSize).view(1, -1)
if torch.isinf(loss).sum() > 0 or torch.isnan(loss).sum() > 0:
loss = 0
return loss
class IDModule(torch.nn.Module):
def __init__(self):
super(IDModule, self).__init__()
def forward(self, feature, *args):
B, C, S = feature.size()
return feature.permute(0, 2, 1), None, None
def cut_data(seq, sizeSeq):
maxSeq = sizeSeq.max()
return seq[:, :maxSeq]
def prepare_data(data):
seq, sizeSeq, phone, sizePhone = data
seq = seq.cuda(non_blocking=True)
phone = phone.cuda(non_blocking=True)
sizeSeq = sizeSeq.cuda(non_blocking=True).view(-1)
sizePhone = sizePhone.cuda(non_blocking=True).view(-1)
seq = cut_data(seq.permute(0, 2, 1), sizeSeq).permute(0, 2, 1)
return seq, sizeSeq, phone, sizePhone
def train_step(train_loader,
model,
criterion,
optimizer,
downsampling_factor):
if model.optimize:
model.train()
criterion.train()
avg_loss = 0
nItems = 0
for data in train_loader:
optimizer.zero_grad()
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
if not model.optimize:
c_feature = c_feature.detach()
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
loss.mean().backward()
avg_loss += loss.mean().item()
nItems += 1
optimizer.step()
return avg_loss / nItems
def val_step(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avg_loss = 0
nItems = 0
for data in val_loader:
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
loss = criterion(c_feature, sizeSeq, phone, sizePhone)
avg_loss += loss.mean().item()
nItems += 1
return avg_loss / nItems
def get_per(data):
pred, size_pred, gt, size_gt, blank_label = data
l_ = min(size_pred // 4, pred.size(0))
p_ = pred[:l_].view(l_, -1).numpy()
gt_seq = gt[:size_gt].view(-1).tolist()
predSeq = beam_search(p_, 20, blank_label)[0][1]
out = get_seq_PER(gt_seq, predSeq)
return out
def perStep(val_loader,
model,
criterion,
downsampling_factor):
model.eval()
criterion.eval()
avgPER = 0
varPER = 0
nItems = 0
print("Starting the PER computation through beam search")
bar = progressbar.ProgressBar(maxval=len(val_loader))
bar.start()
for index, data in enumerate(val_loader):
bar.update(index)
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = model(seq, None)
sizeSeq = sizeSeq / downsampling_factor
predictions = torch.nn.functional.softmax(
criterion.module.getPrediction(c_feature, sizeSeq), dim=2).cpu()
phone = phone.cpu()
sizeSeq = sizeSeq.cpu()
sizePhone = sizePhone.cpu()
bs = c_feature.size(0)
data_per = [(predictions[b], sizeSeq[b], phone[b], sizePhone[b],
criterion.module.BLANK_LABEL) for b in range(bs)]
with Pool(bs) as p:
poolData = p.map(get_per, data_per)
avgPER += sum([x for x in poolData])
varPER += sum([x*x for x in poolData])
nItems += len(poolData)
bar.finish()
avgPER /= nItems
varPER /= nItems
varPER -= avgPER**2
print(f"Average PER {avgPER}")
print(f"Standard deviation PER {math.sqrt(varPER)}")
def run(train_loader,
val_loader,
model,
criterion,
optimizer,
downsampling_factor,
nEpochs,
pathCheckpoint):
print(f"Starting the training for {nEpochs} epochs")
bestLoss = float('inf')
for epoch in range(nEpochs):
lossTrain = train_step(train_loader, model, criterion,
optimizer, downsampling_factor)
print(f"Epoch {epoch} loss train : {lossTrain}")
lossVal = val_step(val_loader, model, criterion, downsampling_factor)
print(f"Epoch {epoch} loss val : {lossVal}")
if lossVal < bestLoss:
bestLoss = lossVal
state_dict = {'classifier': criterion.state_dict(),
'model': model.state_dict(),
'bestLoss': bestLoss}
torch.save(state_dict, pathCheckpoint)
def get_PER_args(args):
path_args_training = os.path.join(args.output, "args_training.json")
with open(path_args_training, 'rb') as file:
data = json.load(file)
if args.pathDB is None:
args.pathDB = data["pathDB"]
args.file_extension = data["file_extension"]
if args.pathVal is None and args.pathPhone is None:
args.pathPhone = data["pathPhone"]
args.pathVal = data["pathVal"]
args.pathCheckpoint = data["pathCheckpoint"]
args.no_pretraining = data["no_pretraining"]
args.LSTM = data.get("LSTM", False)
args.seqNorm = data.get("seqNorm", False)
args.dropout = data.get("dropout", False)
args.in_dim = data.get("in_dim", 1)
args.loss_reduction = data.get("loss_reduction", "mean")
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
parser = argparse.ArgumentParser(description='Simple phone recognition pipeline '
'for the common voices datasets')
subparsers = parser.add_subparsers(dest='command')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('pathDB', type=str,
help='Path to the directory containing the '
'audio data / pre-computed features.')
parser_train.add_argument('pathPhone', type=str,
help='Path to the .txt file containing the '
'phone transcription.')
parser_train.add_argument('pathCheckpoint', type=str,
help='Path to the CPC checkpoint to load. '
'Set to ID to work with pre-cimputed features.')
parser_train.add_argument('--freeze', action='store_true',
help="Freeze the CPC features layers")
parser_train.add_argument('--pathTrain', default=None, type=str,
help='Path to the .txt files containing the '
'list of the training sequences.')
parser_train.add_argument('--pathVal', default=None, type=str,
help='Path to the .txt files containing the '
'list of the validation sequences.')
parser_train.add_argument('--file_extension', type=str, default=".mp3",
help='Extension of the files in the '
'dataset')
parser_train.add_argument('--batchSize', type=int, default=8)
parser_train.add_argument('--nEpochs', type=int, default=30)
parser_train.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer.')
parser_train.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer.')
parser_train.add_argument('--epsilon', type=float, default=1e-08,
help='Value of epsilon for the Adam optimizer.')
parser_train.add_argument('--lr', type=float, default=2e-04,
help='Learning rate.')
parser_train.add_argument('-o', '--output', type=str, default='out',
help="Output directory")
parser_train.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_train.add_argument('--no_pretraining', action='store_true',
help='Activate use a randmly initialized '
'network')
parser_train.add_argument('--LSTM', action='store_true',
help='Activate to add a LSTM to the phone '
'classifier')
parser_train.add_argument('--seqNorm', action='store_true',
help='Activate if you want to normalize each '
'batch of features through time before the '
'phone classification.')
parser_train.add_argument('--kernelSize', type=int, default=8,
help='Number of features to concatenate before '
'feeding them to the phone classifier.')
parser_train.add_argument('--dropout', action='store_true')
parser_train.add_argument('--in_dim', type=int, default=1,
help='Dimension of the input data: useful when '
'working with pre-computed features or '
'stereo audio.')
parser_train.add_argument('--loss_reduction', type=str, default='mean',
choices=['mean', 'sum'])
parser_per = subparsers.add_parser('per')
parser_per.add_argument('output', type=str)
parser_per.add_argument('--batchSize', type=int, default=8)
parser_per.add_argument('--debug', action='store_true',
help='If activated, will only load a few '
'sequences from the dataset.')
parser_per.add_argument('--pathDB',
help="For computing the PER on another dataset",
type=str, default=None)
parser_per.add_argument('--pathVal',
help="For computing the PER on specific sequences",
type=str, default=None)
parser_per.add_argument('--pathPhone',
help="For computing the PER on specific sequences",
default=None, type=str)
parser_per.add_argument('--file_extension', type=str, default=".mp3")
parser_per.add_argument('--name', type=str, default="0")
args = parser.parse_args()
if args.command == 'per':
args = get_PER_args(args)
# Output Directory
if not os.path.isdir(args.output):
os.mkdir(args.output)
name = f"_{args.name}" if args.command == "per" else ""
pathLogs = os.path.join(args.output, f'logs_{args.command}{name}.txt')
tee = subprocess.Popen(["tee", pathLogs], stdin=subprocess.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
inSeqs, _ = findAllSeqs(args.pathDB,
extension=args.file_extension)
# Datasets
if args.command == 'train' and args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, inSeqs)
else:
seqTrain = inSeqs
if args.pathVal is None and args.command == 'train':
random.shuffle(seqTrain)
sizeTrain = int(0.9 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
elif args.pathVal is not None:
seqVal = filterSeqs(args.pathVal, inSeqs)
else:
raise RuntimeError("No validation dataset found for PER computation")
if args.debug:
seqVal = seqVal[:100]
downsampling_factor = 160
if args.pathCheckpoint == 'ID':
downsampling_factor = 1
feature_maker = IDModule()
hiddenGar = args.in_dim
else:
feature_maker, hiddenGar, _ = loadModel([args.pathCheckpoint],
loadStateDict=not args.no_pretraining)
feature_maker.cuda()
feature_maker = torch.nn.DataParallel(feature_maker)
phone_criterion = CTCphone_criterion(hiddenGar, nPhones, args.LSTM,
seqNorm=args.seqNorm,
dropout=args.dropout,
reduction=args.loss_reduction)
phone_criterion.cuda()
phone_criterion = torch.nn.DataParallel(phone_criterion)
print(f"Loading the validation dataset at {args.pathDB}")
datasetVal = SingleSequenceDataset(args.pathDB, seqVal,
phoneLabels, inDim=args.in_dim)
val_loader = DataLoader(datasetVal, batch_size=args.batchSize,
shuffle=True)
# Checkpoint file where the model should be saved
pathCheckpoint = os.path.join(args.output, 'checkpoint.pt')
if args.command == 'train':
feature_maker.optimize = True
if args.freeze:
feature_maker.eval()
feature_maker.optimize = False
for g in feature_maker.parameters():
g.requires_grad = False
if args.debug:
print("debug")
random.shuffle(seqTrain)
seqTrain = seqTrain[:1000]
seqVal = seqVal[:100]
print(f"Loading the training dataset at {args.pathDB}")
datasetTrain = SingleSequenceDataset(args.pathDB, seqTrain,
phoneLabels, inDim=args.in_dim)
train_loader = DataLoader(datasetTrain, batch_size=args.batchSize,
shuffle=True)
# Optimizer
g_params = list(phone_criterion.parameters())
if not args.freeze:
print("Optimizing model")
g_params += list(feature_maker.parameters())
optimizer = torch.optim.Adam(g_params, lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
pathArgs = os.path.join(args.output, "args_training.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
run(train_loader, val_loader, feature_maker, phone_criterion,
optimizer, downsampling_factor, args.nEpochs, pathCheckpoint)
else:
print(f"Loading data at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint,
map_location=lambda storage, loc: storage)
if 'bestLoss' in state_dict:
print(f"Best loss : {state_dict['bestLoss']}")
phone_criterion.load_state_dict(state_dict['classifier'])
feature_maker.load_state_dict(state_dict['model'])
pathArgs = os.path.join(args.output,
f"args_validation_{args.name}.json")
with open(pathArgs, 'w') as file:
json.dump(vars(args), file, indent=2)
perStep(val_loader,
feature_maker,
phone_criterion,
downsampling_factor)
| CPC_audio-main | cpc/eval/common_voices_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import torch
import progressbar
import argparse
import numpy as np
from cpc.dataset import findAllSeqs
from cpc.feature_loader import buildFeature, FeatureModule, \
ModelPhoneCombined, loadSupervisedCriterion, loadModel
def getArgs(pathCheckpoints):
pathArgs = os.path.join(os.path.dirname(pathCheckpoints),
"checkpoint_args.json")
with open(pathArgs, 'rb') as file:
return json.load(file)
def buildAllFeature(featureMaker, pathDB, pathOut,
seqList, stepSize=0.01, strict=False,
maxSizeSeq=64000, format='fea',
seqNorm=False):
totSeqs = len(seqList)
startStep = stepSize / 2
bar = progressbar.ProgressBar(maxval=totSeqs)
bar.start()
for nseq, seqPath in enumerate(seqList):
bar.update(nseq)
feature = buildFeature(featureMaker,
os.path.join(pathDB, seqPath),
strict=strict or seqNorm,
maxSizeSeq=maxSizeSeq,
seqNorm=seqNorm)
_, nSteps, hiddenSize = feature.size()
outName = os.path.basename(os.path.splitext(seqPath)[0]) + f'.{format}'
fname = os.path.join(pathOut, outName)
if format == 'npz':
time = [startStep + step * stepSize for step in range(nSteps)]
values = feature.squeeze(0).float().cpu().numpy()
totTime = np.array([stepSize * nSteps], dtype=np.float32)
with open(fname, 'wb') as f:
np.savez(f, time=time, features=values, totTime=totTime)
elif format == 'npy':
time = [startStep + step * stepSize for step in range(nSteps)]
values = feature.squeeze(0).float().cpu().numpy()
with open(fname, 'wb') as f:
np.save(f, values)
elif format == 'af':
import arrayfire as af
time = [startStep + step * stepSize for step in range(nSteps)]
values = feature.squeeze(0).float().cpu().numpy()
totTime = np.array([stepSize * nSteps], dtype=np.float32)
af.save_array("time", af.Array(time, dtype=af.Dtype.f32), fname)
af.save_array("totTime", af.interop.from_ndarray(totTime),
fname, append=True)
af.save_array("features", af.interop.from_ndarray(values),
fname, append=True)
else:
with open(fname, 'w') as f:
_, nSteps, hiddenSize = feature.size()
for step in range(nSteps):
line = [startStep + step * stepSize] + \
feature[0, step, :].tolist()
line = [str(x) for x in line]
linestr = ' '.join(line) + '\n'
f.write(linestr)
bar.finish()
if __name__ == "__main__":
parser = argparse.ArgumentParser('Build features for zerospeech \
Track1 evaluation')
parser.add_argument('pathDB', help='Path to the reference dataset')
parser.add_argument('pathOut', help='Path to the output features')
parser.add_argument('pathCheckpoint', help='Checkpoint to load')
parser.add_argument('--extension', type=str, default='.wav')
parser.add_argument('--addCriterion', action='store_true')
parser.add_argument('--oneHot', action='store_true')
parser.add_argument('--maxSizeSeq', default=64000, type=int)
parser.add_argument('--train_mode', action='store_true')
parser.add_argument('--format', default='fea', type=str,
choices=['npz', 'fea', 'npy', 'af'])
parser.add_argument('--strict', action='store_true')
parser.add_argument('--dimReduction', type=str, default=None)
parser.add_argument('--centroidLimits', type=int, nargs=2, default=None)
parser.add_argument('--getEncoded', action='store_true')
parser.add_argument('--clusters', type=str, default=None)
parser.add_argument('--seqNorm', action='store_true')
args = parser.parse_args()
if not os.path.isdir(args.pathOut):
os.mkdir(args.pathOut)
with open(os.path.join(os.path.dirname(args.pathOut),
f"{os.path.basename(args.pathOut)}.json"), 'w') \
as file:
json.dump(vars(args), file, indent=2)
outData = [x[1] for x in
findAllSeqs(args.pathDB, extension=args.extension,
loadCache=False)[0]]
featureMaker = loadModel([args.pathCheckpoint])[0]
stepSize = featureMaker.gEncoder.DOWNSAMPLING / 16000
print(f"stepSize : {stepSize}")
featureMaker = FeatureModule(featureMaker, args.getEncoded)
featureMaker.collapse = False
if args.addCriterion:
criterion, nPhones = loadSupervisedCriterion(args.pathCheckpoint)
featureMaker = ModelPhoneCombined(featureMaker, criterion,
nPhones, args.oneHot)
featureMaker = featureMaker.cuda(device=0)
if not args.train_mode:
featureMaker.eval()
buildAllFeature(featureMaker, args.pathDB, args.pathOut, outData,
stepSize=stepSize, strict=args.strict,
maxSizeSeq=args.maxSizeSeq,
format=args.format,
seqNorm=args.seqNorm)
| CPC_audio-main | cpc/eval/build_zeroSpeech_features.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
import torch
import json
import time
import numpy as np
from pathlib import Path
from copy import deepcopy
import os
import cpc.criterion as cr
import cpc.feature_loader as fl
import cpc.utils.misc as utils
from cpc.dataset import AudioBatchData, findAllSeqs, filterSeqs, parseSeqLabels
def train_step(feature_maker, criterion, data_loader, optimizer):
if feature_maker.optimize:
feature_maker.train()
criterion.train()
logs = {"locLoss_train": 0, "locAcc_train": 0}
for step, fulldata in enumerate(data_loader):
optimizer.zero_grad()
batch_data, label = fulldata
c_feature, encoded_data, _ = feature_maker(batch_data, None)
if not feature_maker.optimize:
c_feature, encoded_data = c_feature.detach(), encoded_data.detach()
all_losses, all_acc = criterion(c_feature, encoded_data, label)
totLoss = all_losses.sum()
totLoss.backward()
optimizer.step()
logs["locLoss_train"] += np.asarray([all_losses.mean().item()])
logs["locAcc_train"] += np.asarray([all_acc.mean().item()])
logs = utils.update_logs(logs, step)
logs["iter"] = step
return logs
def val_step(feature_maker, criterion, data_loader):
feature_maker.eval()
criterion.eval()
logs = {"locLoss_val": 0, "locAcc_val": 0}
for step, fulldata in enumerate(data_loader):
with torch.no_grad():
batch_data, label = fulldata
c_feature, encoded_data, _ = feature_maker(batch_data, None)
all_losses, all_acc = criterion(c_feature, encoded_data, label)
logs["locLoss_val"] += np.asarray([all_losses.mean().item()])
logs["locAcc_val"] += np.asarray([all_acc.mean().item()])
logs = utils.update_logs(logs, step)
return logs
def run(feature_maker,
criterion,
train_loader,
val_loader,
optimizer,
logs,
n_epochs,
path_checkpoint):
start_epoch = len(logs["epoch"])
best_acc = -1
start_time = time.time()
for epoch in range(start_epoch, n_epochs):
logs_train = train_step(feature_maker, criterion, train_loader,
optimizer)
logs_val = val_step(feature_maker, criterion, val_loader)
print('')
print('_'*50)
print(f'Ran {epoch + 1} epochs '
f'in {time.time() - start_time:.2f} seconds')
utils.show_logs("Training loss", logs_train)
utils.show_logs("Validation loss", logs_val)
print('_'*50)
print('')
if logs_val["locAcc_val"] > best_acc:
best_state = deepcopy(fl.get_module(feature_maker).state_dict())
best_acc = logs_val["locAcc_val"]
logs["epoch"].append(epoch)
for key, value in dict(logs_train, **logs_val).items():
if key not in logs:
logs[key] = [None for x in range(epoch)]
if isinstance(value, np.ndarray):
value = value.tolist()
logs[key].append(value)
if (epoch % logs["saveStep"] == 0 and epoch > 0) or epoch == n_epochs - 1:
model_state_dict = fl.get_module(feature_maker).state_dict()
criterion_state_dict = fl.get_module(criterion).state_dict()
fl.save_checkpoint(model_state_dict, criterion_state_dict,
optimizer.state_dict(), best_state,
f"{path_checkpoint}_{epoch}.pt")
utils.save_logs(logs, f"{path_checkpoint}_logs.json")
def parse_args(argv):
parser = argparse.ArgumentParser(description='Linear separability trainer'
' (default test in speaker separability)')
parser.add_argument('pathDB', type=str,
help="Path to the directory containing the audio data.")
parser.add_argument('pathTrain', type=str,
help="Path to the list of the training sequences.")
parser.add_argument('pathVal', type=str,
help="Path to the list of the test sequences.")
parser.add_argument('load', type=str, nargs='*',
help="Path to the checkpoint to evaluate.")
parser.add_argument('--pathPhone', type=str, default=None,
help="Path to the phone labels. If given, will"
" compute the phone separability.")
parser.add_argument('--CTC', action='store_true',
help="Use the CTC loss (for phone separability only)")
parser.add_argument('--pathCheckpoint', type=str, default='out',
help="Path of the output directory where the "
" checkpoints should be dumped.")
parser.add_argument('--nGPU', type=int, default=-1,
help='Bumber of GPU. Default=-1, use all available '
'GPUs')
parser.add_argument('--batchSizeGPU', type=int, default=8,
help='Batch size per GPU.')
parser.add_argument('--n_epoch', type=int, default=10)
parser.add_argument('--debug', action='store_true',
help='If activated, will load only a small number '
'of audio data.')
parser.add_argument('--unfrozen', action='store_true',
help="If activated, update the feature network as well"
" as the linear classifier")
parser.add_argument('--no_pretraining', action='store_true',
help="If activated, work from an untrained model.")
parser.add_argument('--file_extension', type=str, default=".flac",
help="Extension of the audio files in pathDB.")
parser.add_argument('--save_step', type=int, default=-1,
help="Frequency at which a checkpoint should be saved,"
" et to -1 (default) to save only the best checkpoint.")
parser.add_argument('--get_encoded', action='store_true',
help="If activated, will work with the output of the "
" convolutional encoder (see CPC's architecture).")
parser.add_argument('--lr', type=float, default=2e-4,
help='Learning rate.')
parser.add_argument('--beta1', type=float, default=0.9,
help='Value of beta1 for the Adam optimizer.')
parser.add_argument('--beta2', type=float, default=0.999,
help='Value of beta2 for the Adam optimizer.')
parser.add_argument('--epsilon', type=float, default=2e-8,
help='Value of epsilon for the Adam optimizer.')
parser.add_argument('--ignore_cache', action='store_true',
help="Activate if the sequences in pathDB have"
" changed.")
parser.add_argument('--size_window', type=int, default=20480,
help="Number of frames to consider in each batch.")
args = parser.parse_args(argv)
if args.nGPU < 0:
args.nGPU = torch.cuda.device_count()
if args.save_step <= 0:
args.save_step = args.n_epoch
args.load = [str(Path(x).resolve()) for x in args.load]
args.pathCheckpoint = str(Path(args.pathCheckpoint).resolve())
return args
def main(argv):
args = parse_args(argv)
logs = {"epoch": [], "iter": [], "saveStep": args.save_step}
load_criterion = False
seqNames, speakers = findAllSeqs(args.pathDB,
extension=args.file_extension,
loadCache=not args.ignore_cache)
model, hidden_gar, hidden_encoder = fl.loadModel(args.load,
loadStateDict=not args.no_pretraining)
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(args.nGPU))
dim_features = hidden_encoder if args.get_encoded else hidden_gar
# Now the criterion
phone_labels = None
if args.pathPhone is not None:
phone_labels, n_phones = parseSeqLabels(args.pathPhone)
if not args.CTC:
print(f"Running phone separability with aligned phones")
criterion = cr.PhoneCriterion(dim_features,
n_phones, args.get_encoded)
else:
print(f"Running phone separability with CTC loss")
criterion = cr.CTCPhoneCriterion(dim_features,
n_phones, args.get_encoded)
else:
print(f"Running speaker separability")
criterion = cr.SpeakerCriterion(dim_features, len(speakers))
criterion.cuda()
criterion = torch.nn.DataParallel(criterion, device_ids=range(args.nGPU))
# Dataset
seq_train = filterSeqs(args.pathTrain, seqNames)
seq_val = filterSeqs(args.pathVal, seqNames)
if args.debug:
seq_train = seq_train[:1000]
seq_val = seq_val[:100]
db_train = AudioBatchData(args.pathDB, args.size_window, seq_train,
phone_labels, len(speakers))
db_val = AudioBatchData(args.pathDB, args.size_window, seq_val,
phone_labels, len(speakers))
batch_size = args.batchSizeGPU * args.nGPU
train_loader = db_train.getDataLoader(batch_size, "uniform", True,
numWorkers=0)
val_loader = db_val.getDataLoader(batch_size, 'sequential', False,
numWorkers=0)
# Optimizer
g_params = list(criterion.parameters())
model.optimize = False
model.eval()
if args.unfrozen:
print("Working in full fine-tune mode")
g_params += list(model.parameters())
model.optimize = True
else:
print("Working with frozen features")
for g in model.parameters():
g.requires_grad = False
optimizer = torch.optim.Adam(g_params, lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
# Checkpoint directory
args.pathCheckpoint = Path(args.pathCheckpoint)
args.pathCheckpoint.mkdir(exist_ok=True)
args.pathCheckpoint = str(args.pathCheckpoint / "checkpoint")
with open(f"{args.pathCheckpoint}_args.json", 'w') as file:
json.dump(vars(args), file, indent=2)
run(model, criterion, train_loader, val_loader, optimizer, logs,
args.n_epoch, args.pathCheckpoint)
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
args = sys.argv[1:]
main(args)
| CPC_audio-main | cpc/eval/linear_separability.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CPC_audio-main | cpc/eval/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import progressbar
import math
import random
def normalize_with_singularity(x):
r"""
Normalize the given vector across the third dimension.
Extend all vectors by eps=1e-12 to put the null vector at the maximal
cosine distance from any non-null vector.
"""
N, S, H = x.size()
norm_x = (x**2).sum(dim=2, keepdim=True)
x /= torch.sqrt(norm_x)
zero_vals = (norm_x == 0).view(N, S)
x[zero_vals] = 1 / math.sqrt(H)
border_vect = torch.zeros((N, S, 1),
dtype=x.dtype,
device=x.device) + 1e-12
border_vect[zero_vals] = -2*1e12
return torch.cat([x, border_vect], dim=2)
def load_item_file(path_item_file):
r""" Load a .item file indicating the triplets for the ABX score. The
input file must have the following fomat:
line 0 : whatever (not read)
line > 0: #file_ID onset offset #phone prev-phone next-phone speaker
onset : begining of the triplet (in s)
onset : end of the triplet (in s)
"""
with open(path_item_file, 'r') as file:
data = file.readlines()[1:]
data = [x.replace('\n', '') for x in data]
out = {}
phone_match = {}
speaker_match = {}
context_match = {}
for line in data:
items = line.split()
assert(len(items) == 7)
fileID = items[0]
if fileID not in out:
out[fileID] = []
onset, offset = float(items[1]), float(items[2])
context = '+'.join([items[4], items[5]])
phone = items[3]
speaker = items[6]
if phone not in phone_match:
s = len(phone_match)
phone_match[phone] = s
phone_id = phone_match[phone]
if context not in context_match:
s = len(context_match)
context_match[context] = s
context_id = context_match[context]
if speaker not in speaker_match:
s = len(speaker_match)
speaker_match[speaker] = s
speaker_id = speaker_match[speaker]
out[fileID].append([onset, offset, context_id, phone_id, speaker_id])
return out, context_match, phone_match, speaker_match
def get_features_group(in_data, index_order):
in_index = list(range(len(in_data)))
in_index.sort(key=lambda x: [in_data[x][i] for i in index_order])
out_groups = []
last_values = [in_data[in_index[0]][i] for i in index_order]
i_s = 0
curr_group = [[] for i in index_order]
n_orders = len(index_order) - 1
tmp = [in_data[i] for i in in_index]
for index, item in enumerate(tmp):
for order_index, order in enumerate(index_order):
if item[order] != last_values[order_index]:
curr_group[-1].append((i_s, index))
for i in range(n_orders, order_index, -1):
curr_group[i-1].append(curr_group[i])
curr_group[i] = []
if order_index == 0:
out_groups += curr_group[0]
curr_group[0] = []
last_values = [item[i] for i in index_order]
i_s = index
break
if i_s < len(in_data):
curr_group[-1].append((i_s, len(in_data)))
for i in range(n_orders, 0, -1):
curr_group[i-1].append(curr_group[i])
out_groups += curr_group[0]
return in_index, out_groups
class ABXFeatureLoader:
def __init__(self,
path_item_file,
seqList,
featureMaker,
stepFeature,
normalize):
r"""
Args:
path_item_file (str): path to the .item files containing the ABX
triplets
seqList (list): list of items (fileID, path) where fileID refers to
the file's ID as used in path_item_file, and path
is the actual path to the input audio sequence
featureMaker (function): either a function or a callable object.
Takes a path as input and outputs the
feature sequence corresponding to the
given file.
normalize (bool): if True all input features will be noramlized
across the channels dimension.
Note:
You can use this dataset with pre-computed features. For example, if
you have a collection of features files in the torch .pt format then
you can just set featureMaker = torch.load.
"""
files_data, self.context_match, self.phone_match, self.speaker_match = \
load_item_file(path_item_file)
self.seqNorm = True
self.stepFeature = stepFeature
self.loadFromFileData(files_data, seqList, featureMaker, normalize)
def loadFromFileData(self, files_data, seqList, feature_maker, normalize):
# self.features[i]: index_start, size, context_id, phone_id, speaker_id
self.features = []
self.INDEX_CONTEXT = 2
self.INDEX_PHONE = 3
self.INDEX_SPEAKER = 4
data = []
totSize = 0
print("Building the input features...")
bar = progressbar.ProgressBar(maxval=len(seqList))
bar.start()
for index, vals in enumerate(seqList):
fileID, file_path = vals
bar.update(index)
if fileID not in files_data:
continue
features = feature_maker(file_path)
if normalize:
features = normalize_with_singularity(features)
features = features.detach().cpu()
features = features.view(features.size(1), features.size(2))
phone_data = files_data[fileID]
for phone_start, phone_end, context_id, phone_id, speaker_id in phone_data:
index_start = max(
0, int(math.ceil(self.stepFeature * phone_start - 0.5)))
index_end = min(features.size(0),
int(math.floor(self.stepFeature * phone_end - 0.5)))
if index_start >= features.size(0) or index_end <= index_start:
continue
loc_size = index_end - index_start
self.features.append([totSize, loc_size, context_id,
phone_id, speaker_id])
data.append(features[index_start:index_end])
totSize += loc_size
bar.finish()
print("...done")
self.data = torch.cat(data, dim=0)
self.feature_dim = self.data.size(1)
def get_data_device(self):
return self.data.device
def cuda(self):
self.data = self.data.cuda()
def cpu(self):
self.data = self.data.cpu()
def get_max_group_size(self, i_group, i_sub_group):
id_start, id_end = self.group_index[i_group][i_sub_group]
return max([self.features[i][1] for i in range(id_start, id_end)])
def get_ids(self, index):
context_id, phone_id, speaker_id = self.features[index][2:]
return context_id, phone_id, speaker_id
def __getitem__(self, index):
i_data, out_size, context_id, phone_id, speaker_id = self.features[index]
return self.data[i_data:(i_data + out_size)], out_size, (context_id, phone_id, speaker_id)
def __len__(self):
return len(self.features)
def get_n_speakers(self):
return len(self.speaker_match)
def get_n_context(self):
return len(self.context_match)
def get_n_phone(self):
return len(self.phone_match)
def get_n_groups(self):
return len(self.group_index)
def get_n_sub_group(self, index_sub_group):
return len(self.group_index[index_sub_group])
def get_iterator(self, mode, max_size_group):
if mode == 'within':
return ABXWithinGroupIterator(self, max_size_group)
if mode == 'across':
return ABXAcrossGroupIterator(self, max_size_group)
raise ValueError(f"Invalid mode: {mode}")
class ABXIterator:
r"""
Base class building ABX's triplets.
"""
def __init__(self, abxDataset, max_size_group):
self.max_size_group = max_size_group
self.dataset = abxDataset
self.len = 0
self.index_csp, self.groups_csp = \
get_features_group(abxDataset.features,
[abxDataset.INDEX_CONTEXT,
abxDataset.INDEX_SPEAKER,
abxDataset.INDEX_PHONE])
def get_group(self, i_start, i_end):
data = []
max_size = 0
to_take = list(range(i_start, i_end))
if i_end - i_start > self.max_size_group:
to_take = random.sample(to_take, k=self.max_size_group)
for i in to_take:
loc_data, loc_size, loc_id = self.dataset[self.index_csp[i]]
max_size = max(loc_size, max_size)
data.append(loc_data)
N = len(to_take)
out_data = torch.zeros(N, max_size,
self.dataset.feature_dim,
device=self.dataset.get_data_device())
out_size = torch.zeros(N, dtype=torch.long,
device=self.dataset.get_data_device())
for i in range(N):
size = data[i].size(0)
out_data[i, :size] = data[i]
out_size[i] = size
return out_data, out_size, loc_id
def __len__(self):
return self.len
def get_board_size(self):
r"""
Get the output dimension of the triplet's space.
"""
pass
class ABXWithinGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX within score.
"""
def __init__(self, abxDataset, max_size_group):
super(ABXWithinGroupIterator, self).__init__(abxDataset,
max_size_group)
self.symmetric = True
for context_group in self.groups_csp:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
if i_end - i_start > 1:
self.len += (len(speaker_group) - 1)
def __iter__(self):
for i_c, context_group in enumerate(self.groups_csp):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.groups_csp[i_c][i_s][i_a]
if i_end_a - i_start_a == 1:
continue
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.groups_csp[i_c][i_s][i_b]
data_b, size_b, id_b = self.get_group(i_start_b,
i_end_b)
data_a, size_a, id_a = self.get_group(i_start_a,
i_end_a)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0]
yield out_coords, (data_a, size_a), (data_b, size_b), \
(data_a, size_a)
def get_board_size(self):
return (self.dataset.get_n_speakers(),
self.dataset.get_n_phone(),
self.dataset.get_n_phone(),
self.dataset.get_n_context())
class ABXAcrossGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX across score.
"""
def __init__(self, abxDataset, max_size_group):
super(ABXAcrossGroupIterator, self).__init__(abxDataset,
max_size_group)
self.symmetric = False
self.get_speakers_from_cp = {}
self.max_x = 5
for context_group in self.groups_csp:
for speaker_group in context_group:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.dataset.get_ids(
self.index_csp[i_start])
if c_id not in self.get_speakers_from_cp:
self.get_speakers_from_cp[c_id] = {}
if p_id not in self.get_speakers_from_cp[c_id]:
self.get_speakers_from_cp[c_id][p_id] = {}
self.get_speakers_from_cp[c_id][p_id][s_id] = (
i_start, i_end)
for context_group in self.groups_csp:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.dataset.get_ids(
self.index_csp[i_start])
self.len += (len(speaker_group) - 1) * (min(self.max_x,
len(self.get_speakers_from_cp[c_id][p_id]) - 1))
def get_other_speakers_in_group(self, i_start_group):
c_id, p_id, s_id = self.dataset.get_ids(self.index_csp[i_start_group])
return [v for k, v in self.get_speakers_from_cp[c_id][p_id].items() if k != s_id]
def get_abx_triplet(self, i_a, i_b, i_x):
i_start_a, i_end_a = i_a
data_a, size_a, id_a = self.get_group(i_start_a, i_end_a)
i_start_b, i_end_b = i_b
data_b, size_b, id_b = self.get_group(i_start_b, i_end_b)
i_start_x, i_end_x = i_x
data_x, size_x, id_x = self.get_group(i_start_x, i_end_x)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0], id_x[2]
return out_coords, (data_a, size_a), (data_b, size_b), \
(data_x, size_x)
def __iter__(self):
for i_c, context_group in enumerate(self.groups_csp):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.groups_csp[i_c][i_s][i_a]
ref = self.get_other_speakers_in_group(i_start_a)
if len(ref) > self.max_x:
speakers_a = random.sample(ref, k=self.max_x)
else:
speakers_a = ref
for i_start_x, i_end_x in speakers_a:
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.groups_csp[i_c][i_s][i_b]
yield self.get_abx_triplet((i_start_a, i_end_a), (i_start_b, i_end_b), (i_start_x, i_end_x))
def get_board_size(self):
return (self.dataset.get_n_speakers(),
self.dataset.get_n_phone(),
self.dataset.get_n_phone(),
self.dataset.get_n_context(),
self.dataset.get_n_speakers())
| CPC_audio-main | cpc/eval/ABX/abx_iterators.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| CPC_audio-main | cpc/eval/ABX/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from nose.tools import eq_, ok_
from . import abx_group_computation
from . import abx_iterators
from pathlib import Path
import numpy as np
import math
class TestDistancesDTW(unittest.TestCase):
def testDTWFunction(self):
X = torch.tensor([[[0, 1], [0, 0], [1, 1], [42, 42]],
[[0, 2], [0, 1], [1, 1], [-1, 0]],
[[0, 0], [0, 1], [0, 0], [21, 211]]],
dtype=torch.float)
X_size = torch.tensor([3, 4, 2])
Y = torch.tensor([[[0, 1], [1, 2], [0, 0]]], dtype=torch.float)
Y_size = torch.tensor([3])
distance_mode = abx_group_computation.get_euclidian_distance_batch
dist = abx_group_computation.get_distance_group_dtw(X, Y,
X_size, Y_size,
distance_function=distance_mode)
eq_(dist.size(), (3, 1))
expected_dist = [[(math.sqrt(2)) / 2], [3 / 4],
[(2 + math.sqrt(2)) / 3]]
for i in range(3):
ok_(abs(expected_dist[i][0] - dist[i].item()) < 1e-4)
def testThetaDTWFunctionSymetric(self):
A = torch.tensor([[[0, 1], [0, 0], [1, 1], [42, 42]],
[[0, 2], [0, 1], [1, 1], [-1, 0]],
[[0, 0], [0, 1], [0, 0], [21, 211]]],
dtype=torch.float)
A_size = torch.tensor([3, 4, 2])
B = torch.tensor([[[0, 1], [1, 2], [0, 0]]], dtype=torch.float)
B_size = torch.tensor([3])
distance_mode = abx_group_computation.get_euclidian_distance_batch
symetric = True
theta = abx_group_computation.get_theta_group_dtw(A, B, A, A_size,
B_size, A_size,
distance_mode,
symetric)
eq_(theta, 0.5)
class testSingularityNormalization(unittest.TestCase):
def testCosineNormalized(self):
x = torch.tensor([[[1., 0., 0., 0.], [0., 0., 0., 0.]],
[[0., 0., -1., 0.], [0.5, -0.5, 0.5, -0.5]]])
y = torch.tensor(
[[[-0.5, -0.5, -0.5, 0.5], [0., 0., 0., 0.], [0., 1., 0., 0.]]])
norm_x = abx_iterators.normalize_with_singularity(x)
norm_y = abx_iterators.normalize_with_singularity(y)
dist = abx_group_computation.get_cosine_distance_batch(norm_x, norm_y)
eq_(dist.size(), (2, 1, 2, 3))
ok_(abs(dist[0, 0, 0, 0] - 0.6667) < 1e-4)
ok_(abs(dist[0, 0, 0, 1] - 1.) < 1e-4)
ok_(abs(dist[0, 0, 0, 2] - 0.5) < 1e-4)
ok_(abs(dist[0, 0, 1, 0] - 1) < 1e-4)
ok_(abs(dist[0, 0, 1, 1]) < 1e-4)
ok_(abs(dist[0, 0, 1, 2] - 1) < 1e-4)
ok_(abs(dist[1, 0, 0, 0] - 0.3333) < 1e-4)
ok_(abs(dist[1, 0, 0, 1] - 1.) < 1e-4)
ok_(abs(dist[1, 0, 0, 2] - 0.5) < 1e-4)
ok_(abs(dist[1, 0, 1, 0]-0.6667) < 1e-4)
ok_(abs(dist[1, 0, 1, 1] - 1.) < 1e-4)
ok_(abs(dist[1, 0, 1, 2] - 0.6667) < 1e-4)
class testGroupMaker(unittest.TestCase):
def test1DGroupMaker(self):
data = [[0], [1], [2], [3], [4], [2], [2], [2]]
order = [0]
out_index, out_data = abx_iterators.get_features_group(data, order)
expected_index = [0, 1, 2, 5, 6, 7, 3, 4]
eq_(out_index, expected_index)
expected_output = [(0, 1), (1, 2), (2, 6), (6, 7), (7, 8)]
eq_(out_data, expected_output)
def test2DGroupMaker(self):
data = [[0, 1], [1, 2], [2, 3], [3, 3],
[4, 0], [2, 2], [4, 2], [2, 2], [0, 3]]
order = [1, 0]
out_index, out_data = abx_iterators.get_features_group(data, order)
expected_index = [4, 0, 1, 5, 7, 6, 8, 2, 3]
eq_(out_index, expected_index)
expected_output = [[(0, 1)],
[(1, 2)],
[(2, 3), (3, 5), (5, 6)],
[(6, 7), (7, 8), (8, 9)]]
eq_(out_data, expected_output)
def test3DGroupMaker(self):
data = [[0, 0, 0, 1],
[41, 1, 0, 2],
[-23, 0, 3, 1],
[220, 1, -2, 3],
[40, 2, 1, 0],
[200, 0, 0, 1]]
order = [1, 3, 2]
out_index, out_data = abx_iterators.get_features_group(data, order)
expected_index = [0, 5, 2, 1, 3, 4]
eq_(out_index, expected_index)
expected_output = [[[(0, 2), (2, 3)]], [
[(3, 4)], [(4, 5)]], [[(5, 6)]]]
eq_(out_data, expected_output)
class testItemLoader(unittest.TestCase):
def setUp(self):
self.test_data_dir = Path(__file__).parent / 'test_data'
def testLoadItemFile(self):
path_item_file = self.test_data_dir / "dummy_item_file.item"
out, context_match, phone_match, speaker_match = \
abx_iterators.load_item_file(path_item_file)
eq_(len(out), 4)
eq_(len(phone_match), 5)
eq_(len(speaker_match), 3)
expected_phones = {'n': 0, 'd': 1, 'ih': 2,
's': 3, 'dh': 4}
eq_(phone_match, expected_phones)
expected_speakers = {'8193': 0, '2222': 1, '12': 2}
eq_(speaker_match, expected_speakers)
expected_context = {'ae+d': 0, 'n+l': 1, 'l+n': 2, 'ih+s': 3,
'n+ax': 4, 'ax+dh': 5, 's+ax': 6}
eq_(context_match, expected_context)
expected_output = {'2107': [[0.3225, 0.5225, 0, 0, 0],
[0.4225, 0.5925, 1, 1, 1],
[1.1025, 1.2925, 6, 4, 2]],
'42': [[0.4525, 0.6525, 1, 1, 1],
[0.5225, 0.7325, 2, 2, 0],
[0.5925, 0.8725, 3, 0, 0]],
'23': [[0.6525, 1.1025, 4, 3, 0],
[0.7325, 1.1925, 4, 3, 1]],
'407': [[0.8725, 1.2425, 5, 3, 1]]}
eq_(expected_output, out)
def testLoadWithinItemFile(self):
path_item_file = self.test_data_dir / "dummy_item_within.item"
out, context_match, phone_match, speaker_match = \
abx_iterators.load_item_file(path_item_file)
expected_output = {'2107': [[0., 0.2, 0, 0, 0],
[0.3225, 0.5225, 1, 0, 0],
[0.6, 0.75, 1, 0, 0],
[0.4225, 0.5925, 2, 1, 1]],
'42': [[0.4525, 0.6525, 2, 1, 1],
[0.1301, 0.2501, 2, 2, 1],
[0.5225, 0.7325, 2, 1, 0],
[0.0025, 0.3561, 3, 1, 1],
[0.5925, 0.8725, 3, 1, 0]]}
eq_(expected_output, out)
class testABXFeatureLoader(unittest.TestCase):
def setUp(self):
self.stepFeature = 10
self.test_data_dir = Path(__file__).parent / 'test_data'
def dummy_feature_maker(path_file, *args):
data = torch.tensor(np.load(path_file))
assert(len(data.size()) == 1)
return data.view(1, -1, 1)
def testBaseLoader(self):
seqList = [('2107', self.test_data_dir / '2107.npy'),
('42', self.test_data_dir / '42.npy'),
('23', self.test_data_dir / '23.npy'),
('407', self.test_data_dir / '407.npy')]
dataset = abx_iterators.ABXFeatureLoader(self.test_data_dir / "dummy_item_file.item",
seqList,
testABXFeatureLoader.dummy_feature_maker,
self.stepFeature,
False)
print(dataset.features)
eq_(dataset.feature_dim, 1)
eq_(len(dataset), 9)
eq_(len(dataset.data.size()), 2)
eq_(len(dataset.data), 16)
data, size, coords = dataset[0]
eq_(size, 1)
eq_(coords, (0, 0, 0))
eq_(data.tolist(), [[3]])
data, size, coords = dataset[3]
eq_(size, 1)
eq_(coords, (1, 1, 1))
eq_(data.tolist(), [[5]])
def testWithinIterator(self):
seqList = [('2107', self.test_data_dir / '2107.npy'),
('42', self.test_data_dir / '42.npy')]
dataset = abx_iterators.ABXFeatureLoader(self.test_data_dir / "dummy_item_within.item",
seqList,
testABXFeatureLoader.dummy_feature_maker,
self.stepFeature,
False)
iterator = dataset.get_iterator('within', 40)
eq_(iterator.index_csp, [0, 1, 2, 6, 3, 4, 5, 8, 7])
eq_(iterator.groups_csp, [[[(0, 1)]], [[(1, 3)]], [
[(3, 4)], [(4, 6), (6, 7)]], [[(7, 8)], [(8, 9)]]])
eq_(len(iterator), 1)
it = iter(iterator)
c1, a_01, b_01, x_01 = next(it)
eq_(c1, (1, 1, 2, 2))
a_1, s_a = a_01
eq_(s_a.tolist(), [1, 1])
eq_(a_1.tolist(), [[[4.]], [[5.]]])
eq_(x_01[0].tolist(), a_1.tolist())
eq_(x_01[1].tolist(), s_a.tolist())
eq_(b_01[0].tolist(), [[[1.]]])
eq_(b_01[1].item(), 1)
eq_(next(it, False), False)
eq_(iterator.get_board_size(), (2, 3, 3, 4))
| CPC_audio-main | cpc/eval/ABX/unit_tests.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import math
from . import dtw
import progressbar
def get_distance_function_from_name(name_str):
if name_str == 'euclidian':
return get_euclidian_distance_batch
if name_str == 'cosine':
return get_cosine_distance_batch
raise ValueError(f"Invalid distance mode")
def check_dtw_group_validity(a, b, x):
assert(len(a.size()) == len(b.size()))
assert(len(a.size()) == len(x.size()))
assert(a.size(2) == x.size(2))
assert(a.size(2) == b.size(2))
def get_cosine_distance_batch(a1, a2, epsilon=1e-8):
r""" a1 and a2 must be normalized"""
N1, S1, D = a1.size() # Batch x Seq x Channel
N2, S2, D = a2.size() # Batch x Seq x Channel
prod = (a1.view(N1, 1, S1, 1, D)) * (a2.view(1, N2, 1, S2, D))
# Sum accross the channel dimension
prod = torch.clamp(prod.sum(dim=4), -1, 1).acos() / math.pi
return prod
def get_euclidian_distance_batch(a1, a2):
N1, S1, D = a1.size()
N2, S2, D = a2.size()
diff = a1.view(N1, 1, S1, 1, D) - a2.view(1, N2, 1, S2, D)
return torch.sqrt((diff**2).sum(dim=4))
def get_distance_group_dtw(a1, a2, size1, size2,
ignore_diag=False, symmetric=False,
distance_function=get_cosine_distance_batch):
N1, S1, D = a1.size()
N2, S2, D = a2.size()
if size1.size(0) != N1:
print(a1.size(), size1.size())
print(a2.size(), size2.size())
assert(size1.size(0) == N1)
assert(size2.size(0) == N2)
distance_mat = distance_function(a1, a2).detach().cpu().numpy()
return dtw.dtw_batch(a1, a2, size1, size2,
distance_mat,
ignore_diag, symmetric)
def get_theta_group_dtw(a, b, x, sa, sb, sx, distance_function, symmetric):
check_dtw_group_validity(a, b, x)
dxb = get_distance_group_dtw(
x, b, sx, sb, distance_function=distance_function)
dxa = get_distance_group_dtw(x, a, sx, sa, ignore_diag=symmetric,
symmetric=symmetric,
distance_function=distance_function)
Nx, Na = dxa.size()
Nx, Nb = dxb.size()
if symmetric:
n_pos = Na * (Na - 1)
max_val = dxb.max().item()
for i in range(Na):
dxa[i, i] = max_val + 1
else:
n_pos = Na * Nx
dxb = dxb.view(Nx, 1, Nb).expand(Nx, Na, Nb)
dxa = dxa.view(Nx, Na, 1).expand(Nx, Na, Nb)
sc = (dxa < dxb).sum() + 0.5 * (dxa == dxb).sum()
sc /= (n_pos * Nb)
return sc.item()
def loc_dtw(data, distance_function, symmetric):
coords, group_a, group_b, group_x = data
group_a_data, group_a_size = group_a
group_b_data, group_b_size = group_b
group_x_data, group_x_size = group_x
theta = get_theta_group_dtw(group_a_data,
group_b_data,
group_x_data,
group_a_size,
group_b_size,
group_x_size,
distance_function,
symmetric)
return (coords, 1 - theta)
def get_abx_scores_dtw_on_group(group_iterator,
distance_function,
symmetric):
data_list = []
coords_list = []
bar = progressbar.ProgressBar(maxval=len(group_iterator))
bar.start()
with torch.no_grad():
for index, group in enumerate(group_iterator):
bar.update(index)
coords, abx = loc_dtw(group, distance_function, symmetric)
data_list.append(abx)
coords_list.append(coords)
bar.finish()
return torch.sparse.FloatTensor(torch.LongTensor(coords_list).t(),
torch.FloatTensor(data_list),
group_iterator.get_board_size())
| CPC_audio-main | cpc/eval/ABX/abx_group_computation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchaudio
import progressbar
import os
import sys
from pathlib import Path
def adjust_sample_rate(path_db, file_list, path_db_out,
target_sr):
bar = progressbar.ProgressBar(maxval=len(file_list))
bar.start()
for index, item in enumerate(file_list):
path_in = os.path.join(path_db, item)
path_out = os.path.join(path_db_out, item)
bar.update(index)
data, sr = torchaudio.load(path_in)
transform = torchaudio.transforms.Resample(orig_freq=sr,
new_freq=target_sr,
resampling_method='sinc_interpolation')
data = transform(data)
torchaudio.save(path_out, data, target_sr,
precision=16, channels_first=True)
bar.finish()
def get_names_list(path_tsv_file):
with open(path_tsv_file, 'r') as file:
data = file.readlines()
return [x.split()[0] for x in data]
def parse_args(argv):
parser = argparse.ArgumentParser(description='Adjust the sample rate of '
'a given group of audio files')
parser.add_argument('path_db', type=str,
help='Path to the directory containing the audio '
'files')
parser.add_argument("path_phone_files", type=str,
help='Path to the .txt file containing the list of '
'the files with a phone transcription')
parser.add_argument("path_out", type=str,
help='Path out the output directory')
parser.add_argument("--out_sample_rate", type=int, default=16000,
help="Sample rate of the output audio files "
"(default is 160000)")
parser.add_argument('--file_extension', type=str, default='.mp3')
return parser.parse_args(argv)
def main(argv):
args = parse_args(argv)
file_list_db = [f for f in os.listdir(args.path_db)
if Path(f).suffix == args.file_extension]
print(f"Found {len(file_list_db)} in the dataset")
file_list_phone = get_names_list(args.path_phone_files)
print(f"Found {len(file_list_phone)} with a phone transcription")
file_list_db.sort()
file_list_phone.sort()
out_list = []
index_phone = 0
for file_name in file_list_db:
while Path(file_name).stem > file_list_phone[index_phone]:
index_phone += 1
if index_phone >= len(file_list_phone):
break
if Path(file_name).stem == file_list_phone[index_phone]:
out_list.append(file_name)
print(f"Converting {len(out_list)} files")
Path(args.path_out).mkdir(parents=True, exist_ok=True)
adjust_sample_rate(args.path_db, out_list,
args.path_out, args.out_sample_rate)
if __name__ == '__main__':
main(sys.argv[1:])
| CPC_audio-main | cpc/eval/utils/adjust_sample_rate.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import glob
import argparse
import numpy as np
import resampy
from scikits.audiolab import Sndfile, Format
def load_wav(fname, rate=None):
fp = Sndfile(fname, 'r')
_signal = fp.read_frames(fp.nframes)
_signal = _signal.reshape((-1, fp.channels))
_rate = fp.samplerate
if _signal.ndim == 1:
_signal.reshape((-1, 1))
if rate is not None and rate != _rate:
signal = resampy.resample(_signal, _rate, rate, axis=0, filter='kaiser_best')
else:
signal = _signal
rate = _rate
return signal, rate
def save_wav(fname, signal, rate):
fp = Sndfile(fname, 'w', Format('wav'), signal.shape[1], rate)
fp.write_frames(signal)
fp.close()
def reEncodeAudio(audio_path, new_rate):
audio, audio_rate = load_wav(audio_path,new_rate)
save_wav(audio_path, audio, new_rate)
def main():
parser = argparse.ArgumentParser(description="re-encode all audios under a directory")
parser.add_argument("--audio_dir_path", type=str, required=True)
parser.add_argument("--new_rate", type=int, default=16000)
args = parser.parse_args()
audio_list = glob.glob(args.audio_dir_path + '/*.wav')
print "Total number of audios to re-encode: ", len(audio_list)
for audio_path in audio_list:
reEncodeAudio(os.path.join(args.audio_dir_path, audio_path), args.new_rate)
if __name__ == '__main__':
main()
| 2.5D-Visual-Sound-main | reEncodeAudio.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import torch
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import ModelBuilder
from models.audioVisual_model import AudioVisualModel
from torch.autograd import Variable
from tensorboardX import SummaryWriter
def create_optimizer(nets, opt):
(net_visual, net_audio) = nets
param_groups = [{'params': net_visual.parameters(), 'lr': opt.lr_visual},
{'params': net_audio.parameters(), 'lr': opt.lr_audio}]
if opt.optimizer == 'sgd':
return torch.optim.SGD(param_groups, momentum=opt.beta1, weight_decay=opt.weight_decay)
elif opt.optimizer == 'adam':
return torch.optim.Adam(param_groups, betas=(opt.beta1,0.999), weight_decay=opt.weight_decay)
def decrease_learning_rate(optimizer, decay_factor=0.94):
for param_group in optimizer.param_groups:
param_group['lr'] *= decay_factor
#used to display validation loss
def display_val(model, loss_criterion, writer, index, dataset_val, opt):
losses = []
with torch.no_grad():
for i, val_data in enumerate(dataset_val):
if i < opt.validation_batches:
output = model.forward(val_data)
loss = loss_criterion(output['binaural_spectrogram'], output['audio_gt'])
losses.append(loss.item())
else:
break
avg_loss = sum(losses)/len(losses)
if opt.tensorboard:
writer.add_scalar('data/val_loss', avg_loss, index)
print('val loss: %.3f' % avg_loss)
return avg_loss
#parse arguments
opt = TrainOptions().parse()
opt.device = torch.device("cuda")
#construct data loader
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(data_loader)
print('#training clips = %d' % dataset_size)
#create validation set data loader if validation_on option is set
if opt.validation_on:
#temperally set to val to load val data
opt.mode = 'val'
data_loader_val = CreateDataLoader(opt)
dataset_val = data_loader_val.load_data()
dataset_size_val = len(data_loader_val)
print('#validation clips = %d' % dataset_size_val)
opt.mode = 'train' #set it back
if opt.tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter(comment=opt.name)
else:
writer = None
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
# set up optimizer
optimizer = create_optimizer(nets, opt)
# set up loss function
loss_criterion = torch.nn.MSELoss()
if(len(opt.gpu_ids) > 0):
loss_criterion.cuda(opt.gpu_ids[0])
# initialization
total_steps = 0
data_loading_time = []
model_forward_time = []
model_backward_time = []
batch_loss = []
best_err = float("inf")
for epoch in range(1, opt.niter+1):
torch.cuda.synchronize()
epoch_start_time = time.time()
if(opt.measure_time):
iter_start_time = time.time()
for i, data in enumerate(dataset):
if(opt.measure_time):
torch.cuda.synchronize()
iter_data_loaded_time = time.time()
total_steps += opt.batchSize
# forward pass
model.zero_grad()
output = model.forward(data)
# compute loss
loss = loss_criterion(output['binaural_spectrogram'], Variable(output['audio_gt'], requires_grad=False))
batch_loss.append(loss.item())
if(opt.measure_time):
torch.cuda.synchronize()
iter_data_forwarded_time = time.time()
# update optimizer
optimizer.zero_grad()
loss.backward()
optimizer.step()
if(opt.measure_time):
iter_model_backwarded_time = time.time()
data_loading_time.append(iter_data_loaded_time - iter_start_time)
model_forward_time.append(iter_data_forwarded_time - iter_data_loaded_time)
model_backward_time.append(iter_model_backwarded_time - iter_data_forwarded_time)
if(total_steps // opt.batchSize % opt.display_freq == 0):
print('Display training progress at (epoch %d, total_steps %d)' % (epoch, total_steps))
avg_loss = sum(batch_loss) / len(batch_loss)
print('Average loss: %.3f' % (avg_loss))
batch_loss = []
if opt.tensorboard:
writer.add_scalar('data/loss', avg_loss, total_steps)
if(opt.measure_time):
print('average data loading time: ' + str(sum(data_loading_time)/len(data_loading_time)))
print('average forward time: ' + str(sum(model_forward_time)/len(model_forward_time)))
print('average backward time: ' + str(sum(model_backward_time)/len(model_backward_time)))
data_loading_time = []
model_forward_time = []
model_backward_time = []
print('end of display \n')
if(total_steps // opt.batchSize % opt.save_latest_freq == 0):
print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'visual_latest.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'audio_latest.pth'))
if(total_steps // opt.batchSize % opt.validation_freq == 0 and opt.validation_on):
model.eval()
opt.mode = 'val'
print('Display validation results at (epoch %d, total_steps %d)' % (epoch, total_steps))
val_err = display_val(model, loss_criterion, writer, total_steps, dataset_val, opt)
print('end of display \n')
model.train()
opt.mode = 'train'
#save the model that achieves the smallest validation error
if val_err < best_err:
best_err = val_err
print('saving the best model (epoch %d, total_steps %d) with validation error %.3f\n' % (epoch, total_steps, val_err))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'visual_best.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, 'audio_best.pth'))
if(opt.measure_time):
iter_start_time = time.time()
if(epoch % opt.save_epoch_freq == 0):
print('saving the model at the end of epoch %d, total_steps %d' % (epoch, total_steps))
torch.save(net_visual.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, str(epoch) + '_visual.pth'))
torch.save(net_audio.state_dict(), os.path.join('.', opt.checkpoints_dir, opt.name, str(epoch) + '_audio.pth'))
#decrease learning rate 6% every opt.learning_rate_decrease_itr epochs
if(opt.learning_rate_decrease_itr > 0 and epoch % opt.learning_rate_decrease_itr == 0):
decrease_learning_rate(optimizer, opt.decay_factor)
print('decreased learning rate by ', opt.decay_factor)
| 2.5D-Visual-Sound-main | train.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import librosa
import argparse
import numpy as np
from numpy import linalg as LA
from scipy.signal import hilbert
from data.audioVisual_dataset import generate_spectrogram
import statistics as stat
def normalize(samples):
return samples / np.maximum(1e-20, np.max(np.abs(samples)))
def STFT_L2_distance(predicted_binaural, gt_binaural):
#channel1
predicted_spect_channel1 = librosa.core.stft(np.asfortranarray(predicted_binaural[0,:]), n_fft=512, hop_length=160, win_length=400, center=True)
gt_spect_channel1 = librosa.core.stft(np.asfortranarray(gt_binaural[0,:]), n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(predicted_spect_channel1), axis=0)
imag = np.expand_dims(np.imag(predicted_spect_channel1), axis=0)
predicted_realimag_channel1 = np.concatenate((real, imag), axis=0)
real = np.expand_dims(np.real(gt_spect_channel1), axis=0)
imag = np.expand_dims(np.imag(gt_spect_channel1), axis=0)
gt_realimag_channel1 = np.concatenate((real, imag), axis=0)
channel1_distance = np.mean(np.power((predicted_realimag_channel1 - gt_realimag_channel1), 2))
#channel2
predicted_spect_channel2 = librosa.core.stft(np.asfortranarray(predicted_binaural[1,:]), n_fft=512, hop_length=160, win_length=400, center=True)
gt_spect_channel2 = librosa.core.stft(np.asfortranarray(gt_binaural[1,:]), n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(predicted_spect_channel2), axis=0)
imag = np.expand_dims(np.imag(predicted_spect_channel2), axis=0)
predicted_realimag_channel2 = np.concatenate((real, imag), axis=0)
real = np.expand_dims(np.real(gt_spect_channel2), axis=0)
imag = np.expand_dims(np.imag(gt_spect_channel2), axis=0)
gt_realimag_channel2 = np.concatenate((real, imag), axis=0)
channel2_distance = np.mean(np.power((predicted_realimag_channel2 - gt_realimag_channel2), 2))
#sum the distance between two channels
stft_l2_distance = channel1_distance + channel2_distance
return float(stft_l2_distance)
def Envelope_distance(predicted_binaural, gt_binaural):
#channel1
pred_env_channel1 = np.abs(hilbert(predicted_binaural[0,:]))
gt_env_channel1 = np.abs(hilbert(gt_binaural[0,:]))
channel1_distance = np.sqrt(np.mean((gt_env_channel1 - pred_env_channel1)**2))
#channel2
pred_env_channel2 = np.abs(hilbert(predicted_binaural[1,:]))
gt_env_channel2 = np.abs(hilbert(gt_binaural[1,:]))
channel2_distance = np.sqrt(np.mean((gt_env_channel2 - pred_env_channel2)**2))
#sum the distance between two channels
envelope_distance = channel1_distance + channel2_distance
return float(envelope_distance)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--results_root', type=str, required=True)
parser.add_argument('--audio_sampling_rate', default=16000, type=int, help='audio sampling rate')
parser.add_argument('--real_mono', default=False, type=bool, help='whether the input predicted binaural audio is mono audio')
parser.add_argument('--normalization', default=False, type=bool)
args = parser.parse_args()
stft_distance_list = []
envelope_distance_list = []
audioNames = os.listdir(args.results_root)
index = 1
for audio_name in audioNames:
if index % 10 == 0:
print "Evaluating testing example " + str(index) + " :", audio_name
#check whether input binaural is mono, replicate to two channels if it's mono
if args.real_mono:
mono_sound, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'mixed_mono.wav'), sr=args.audio_sampling_rate)
predicted_binaural = np.repeat(np.expand_dims(mono_sound, 0), 2, axis=0)
if args.normalization:
predicted_binaural = normalize(predicted_binaural)
else:
predicted_binaural, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'predicted_binaural.wav'), sr=args.audio_sampling_rate, mono=False)
if args.normalization:
predicted_binaural = normalize(predicted_binaural)
gt_binaural, audio_rate = librosa.load(os.path.join(args.results_root, audio_name, 'input_binaural.wav'), sr=args.audio_sampling_rate, mono=False)
if args.normalization:
gt_binaural = normalize(gt_binaural)
#get results for this audio
stft_distance_list.append(STFT_L2_distance(predicted_binaural, gt_binaural))
envelope_distance_list.append(Envelope_distance(predicted_binaural, gt_binaural))
index = index + 1
#print the results
print "STFT L2 Distance: ", stat.mean(stft_distance_list), stat.stdev(stft_distance_list), stat.stdev(stft_distance_list) / np.sqrt(len(stft_distance_list))
print "Average Envelope Distance: ", stat.mean(envelope_distance_list), stat.stdev(envelope_distance_list), stat.stdev(envelope_distance_list) / np.sqrt(len(envelope_distance_list))
if __name__ == '__main__':
main()
| 2.5D-Visual-Sound-main | evaluate.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import librosa
import numpy as np
from PIL import Image
import subprocess
from options.test_options import TestOptions
import torchvision.transforms as transforms
import torch
from models.models import ModelBuilder
from models.audioVisual_model import AudioVisualModel
from data.audioVisual_dataset import generate_spectrogram
def audio_normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return rms / desired_rms, samples
def main():
#load test arguments
opt = TestOptions().parse()
opt.device = torch.device("cuda")
# network builders
builder = ModelBuilder()
net_visual = builder.build_visual(weights=opt.weights_visual)
net_audio = builder.build_audio(
ngf=opt.unet_ngf,
input_nc=opt.unet_input_nc,
output_nc=opt.unet_output_nc,
weights=opt.weights_audio)
nets = (net_visual, net_audio)
# construct our audio-visual model
model = AudioVisualModel(nets, opt)
model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
model.to(opt.device)
model.eval()
#load the audio to perform separation
audio, audio_rate = librosa.load(opt.input_audio_path, sr=opt.audio_sampling_rate, mono=False)
audio_channel1 = audio[0,:]
audio_channel2 = audio[1,:]
#define the transformation to perform on visual frames
vision_transform_list = [transforms.Resize((224,448)), transforms.ToTensor()]
vision_transform_list.append(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))
vision_transform = transforms.Compose(vision_transform_list)
#perform spatialization over the whole audio using a sliding window approach
overlap_count = np.zeros((audio.shape)) #count the number of times a data point is calculated
binaural_audio = np.zeros((audio.shape))
#perform spatialization over the whole spectrogram in a siliding-window fashion
sliding_window_start = 0
data = {}
samples_per_window = int(opt.audio_length * opt.audio_sampling_rate)
while sliding_window_start + samples_per_window < audio.shape[-1]:
sliding_window_end = sliding_window_start + samples_per_window
normalizer, audio_segment = audio_normalize(audio[:,sliding_window_start:sliding_window_end])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
audio_segment_mix = audio_segment_channel1 + audio_segment_channel2
data['audio_diff_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
data['audio_mix_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for current window
frame_index = int(round((((sliding_window_start + samples_per_window / 2.0) / audio.shape[-1]) * opt.input_audio_length + 0.05) * 10 ))
image = Image.open(os.path.join(opt.video_frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
data['frame'] = frame
output = model.forward(data)
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
#ISTFT to convert back to audio
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
binaural_audio[:,sliding_window_start:sliding_window_end] = binaural_audio[:,sliding_window_start:sliding_window_end] + reconstructed_binaural
overlap_count[:,sliding_window_start:sliding_window_end] = overlap_count[:,sliding_window_start:sliding_window_end] + 1
sliding_window_start = sliding_window_start + int(opt.hop_size * opt.audio_sampling_rate)
#deal with the last segment
normalizer, audio_segment = audio_normalize(audio[:,-samples_per_window:])
audio_segment_channel1 = audio_segment[0,:]
audio_segment_channel2 = audio_segment[1,:]
data['audio_diff_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 - audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
data['audio_mix_spec'] = torch.FloatTensor(generate_spectrogram(audio_segment_channel1 + audio_segment_channel2)).unsqueeze(0) #unsqueeze to add a batch dimension
#get the frame index for last window
frame_index = int(round(((opt.input_audio_length - opt.audio_length / 2.0) + 0.05) * 10))
image = Image.open(os.path.join(opt.video_frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB')
#image = image.transpose(Image.FLIP_LEFT_RIGHT)
frame = vision_transform(image).unsqueeze(0) #unsqueeze to add a batch dimension
data['frame'] = frame
output = model.forward(data)
predicted_spectrogram = output['binaural_spectrogram'][0,:,:,:].data[:].cpu().numpy()
#ISTFT to convert back to audio
reconstructed_stft_diff = predicted_spectrogram[0,:,:] + (1j * predicted_spectrogram[1,:,:])
reconstructed_signal_diff = librosa.istft(reconstructed_stft_diff, hop_length=160, win_length=400, center=True, length=samples_per_window)
reconstructed_signal_left = (audio_segment_mix + reconstructed_signal_diff) / 2
reconstructed_signal_right = (audio_segment_mix - reconstructed_signal_diff) / 2
reconstructed_binaural = np.concatenate((np.expand_dims(reconstructed_signal_left, axis=0), np.expand_dims(reconstructed_signal_right, axis=0)), axis=0) * normalizer
#add the spatialized audio to reconstructed_binaural
binaural_audio[:,-samples_per_window:] = binaural_audio[:,-samples_per_window:] + reconstructed_binaural
overlap_count[:,-samples_per_window:] = overlap_count[:,-samples_per_window:] + 1
#divide aggregated predicted audio by their corresponding counts
predicted_binaural_audio = np.divide(binaural_audio, overlap_count)
#check output directory
if not os.path.isdir(opt.output_dir_root):
os.mkdir(opt.output_dir_root)
mixed_mono = (audio_channel1 + audio_channel2) / 2
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'predicted_binaural.wav'), predicted_binaural_audio, opt.audio_sampling_rate)
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'mixed_mono.wav'), mixed_mono, opt.audio_sampling_rate)
librosa.output.write_wav(os.path.join(opt.output_dir_root, 'input_binaural.wav'), audio, opt.audio_sampling_rate)
if __name__ == '__main__':
main()
| 2.5D-Visual-Sound-main | demo.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--input_audio_path', required=True, help='path to the input audio file')
self.parser.add_argument('--video_frame_path', required=True, help='path to the input video frames')
self.parser.add_argument('--output_dir_root', type=str, default='test_output', help='path to the output files')
self.parser.add_argument('--input_audio_length', type=float, default=10, help='length of the testing video/audio')
self.parser.add_argument('--hop_size', default=0.05, type=float, help='the hop length to perform audio spatialization in a sliding window approach')
#model arguments
self.parser.add_argument('--weights_visual', type=str, default='', help="weights for visual stream")
self.parser.add_argument('--weights_audio', type=str, default='', help="weights for audio stream")
self.parser.add_argument('--unet_ngf', type=int, default=64, help="unet base channel dimension")
self.parser.add_argument('--unet_input_nc', type=int, default=2, help="input spectrogram number of channels")
self.parser.add_argument('--unet_output_nc', type=int, default=2, help="output spectrogram number of channels")
self.mode = "test"
self.isTrain = False
| 2.5D-Visual-Sound-main | options/test_options.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=50, help='frequency of displaying average loss')
self.parser.add_argument('--save_epoch_freq', type=int, default=50, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
self.parser.add_argument('--niter', type=int, default=1000, help='# of epochs to train')
self.parser.add_argument('--learning_rate_decrease_itr', type=int, default=-1, help='how often is the learning rate decreased by six percent')
self.parser.add_argument('--decay_factor', type=float, default=0.94, help='learning rate decay factor')
self.parser.add_argument('--tensorboard', type=bool, default=False, help='use tensorboard to visualize loss change ')
self.parser.add_argument('--measure_time', type=bool, default=False, help='measure time of different steps during training')
self.parser.add_argument('--validation_on', action='store_true', help='whether to test on validation set during training')
self.parser.add_argument('--validation_freq', type=int, default=100, help='frequency of testing on validation set')
self.parser.add_argument('--validation_batches', type=int, default=10, help='number of batches to test for validation')
self.parser.add_argument('--enable_data_augmentation', type=bool, default=True, help='whether to augment input frame')
#model arguments
self.parser.add_argument('--weights_visual', type=str, default='', help="weights for visual stream")
self.parser.add_argument('--weights_audio', type=str, default='', help="weights for audio stream")
self.parser.add_argument('--unet_ngf', type=int, default=64, help="unet base channel dimension")
self.parser.add_argument('--unet_input_nc', type=int, default=2, help="input spectrogram number of channels")
self.parser.add_argument('--unet_output_nc', type=int, default=2, help="output spectrogram number of channels")
#optimizer arguments
self.parser.add_argument('--lr_visual', type=float, default=0.0001, help='learning rate for visual stream')
self.parser.add_argument('--lr_audio', type=float, default=0.001, help='learning rate for unet')
self.parser.add_argument('--optimizer', default='adam', type=str, help='adam or sgd for optimization')
self.parser.add_argument('--beta1', default=0.9, type=float, help='momentum for sgd, beta1 for adam')
self.parser.add_argument('--weight_decay', default=0.0005, type=float, help='weights regularizer')
self.mode = "train"
self.isTrain = True
self.enable_data_augmentation = True
| 2.5D-Visual-Sound-main | options/train_options.py |
2.5D-Visual-Sound-main | options/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
from util import util
import torch
class BaseOptions():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialized = False
def initialize(self):
self.parser.add_argument('--hdf5FolderPath', help='path to the folder that contains train.h5, val.h5 and test.h5')
self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
self.parser.add_argument('--name', type=str, default='spatialAudioVisual', help='name of the experiment. It decides where to store models')
self.parser.add_argument('--checkpoints_dir', type=str, default='checkpoints/', help='models are saved here')
self.parser.add_argument('--model', type=str, default='audioVisual', help='chooses how datasets are loaded.')
self.parser.add_argument('--batchSize', type=int, default=32, help='input batch size')
self.parser.add_argument('--nThreads', default=16, type=int, help='# threads for loading data')
self.parser.add_argument('--audio_sampling_rate', default=16000, type=int, help='audio sampling rate')
self.parser.add_argument('--audio_length', default=0.63, type=float, help='audio length, default 0.63s')
self.enable_data_augmentation = True
self.initialized = True
def parse(self):
if not self.initialized:
self.initialize()
self.opt = self.parser.parse_args()
self.opt.mode = self.mode
self.opt.isTrain = self.isTrain
self.opt.enable_data_augmentation = self.enable_data_augmentation
str_ids = self.opt.gpu_ids.split(',')
self.opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
self.opt.gpu_ids.append(id)
# set gpu ids
if len(self.opt.gpu_ids) > 0:
torch.cuda.set_device(self.opt.gpu_ids[0])
#I should process the opt here, like gpu ids, etc.
args = vars(self.opt)
print('------------ Options -------------')
for k, v in sorted(args.items()):
print('%s: %s' % (str(k), str(v)))
print('-------------- End ----------------')
# save to the disk
expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write('------------ Options -------------\n')
for k, v in sorted(args.items()):
opt_file.write('%s: %s\n' % (str(k), str(v)))
opt_file.write('-------------- End ----------------\n')
return self.opt
| 2.5D-Visual-Sound-main | options/base_options.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
| 2.5D-Visual-Sound-main | util/util.py |
2.5D-Visual-Sound-main | util/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torchvision
from .networks import VisualNet, AudioNet, weights_init
class ModelBuilder():
# builder for visual stream
def build_visual(self, weights=''):
pretrained = True
original_resnet = torchvision.models.resnet18(pretrained)
net = VisualNet(original_resnet)
if len(weights) > 0:
print('Loading weights for visual stream')
net.load_state_dict(torch.load(weights))
return net
#builder for audio stream
def build_audio(self, ngf=64, input_nc=2, output_nc=2, weights=''):
#AudioNet: 5 layer UNet
net = AudioNet(ngf, input_nc, output_nc)
net.apply(weights_init)
if len(weights) > 0:
print('Loading weights for audio stream')
net.load_state_dict(torch.load(weights))
return net
| 2.5D-Visual-Sound-main | models/models.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
from torch import optim
import torch.nn.functional as F
from . import networks,criterion
from torch.autograd import Variable
class AudioVisualModel(torch.nn.Module):
def name(self):
return 'AudioVisualModel'
def __init__(self, nets, opt):
super(AudioVisualModel, self).__init__()
self.opt = opt
#initialize model
self.net_visual, self.net_audio = nets
def forward(self, input, volatile=False):
visual_input = input['frame']
audio_diff = input['audio_diff_spec']
audio_mix = input['audio_mix_spec']
audio_gt = Variable(audio_diff[:,:,:-1,:], requires_grad=False)
input_spectrogram = Variable(audio_mix, requires_grad=False, volatile=volatile)
visual_feature = self.net_visual(Variable(visual_input, requires_grad=False, volatile=volatile))
mask_prediction = self.net_audio(input_spectrogram, visual_feature)
#complex masking to obtain the predicted spectrogram
spectrogram_diff_real = input_spectrogram[:,0,:-1,:] * mask_prediction[:,0,:,:] - input_spectrogram[:,1,:-1,:] * mask_prediction[:,1,:,:]
spectrogram_diff_img = input_spectrogram[:,0,:-1,:] * mask_prediction[:,1,:,:] + input_spectrogram[:,1,:-1,:] * mask_prediction[:,0,:,:]
binaural_spectrogram = torch.cat((spectrogram_diff_real.unsqueeze(1), spectrogram_diff_img.unsqueeze(1)), 1)
output = {'mask_prediction': mask_prediction, 'binaural_spectrogram': binaural_spectrogram, 'audio_gt': audio_gt}
return output
| 2.5D-Visual-Sound-main | models/audioVisual_model.py |
2.5D-Visual-Sound-main | models/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
def unet_conv(input_nc, output_nc, norm_layer=nn.BatchNorm2d):
downconv = nn.Conv2d(input_nc, output_nc, kernel_size=4, stride=2, padding=1)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(output_nc)
return nn.Sequential(*[downconv, downnorm, downrelu])
def unet_upconv(input_nc, output_nc, outermost=False, norm_layer=nn.BatchNorm2d):
upconv = nn.ConvTranspose2d(input_nc, output_nc, kernel_size=4, stride=2, padding=1)
uprelu = nn.ReLU(True)
upnorm = norm_layer(output_nc)
if not outermost:
return nn.Sequential(*[upconv, upnorm, uprelu])
else:
return nn.Sequential(*[upconv, nn.Sigmoid()])
def create_conv(input_channels, output_channels, kernel, paddings, batch_norm=True, Relu=True, stride=1):
model = [nn.Conv2d(input_channels, output_channels, kernel, stride = stride, padding = paddings)]
if(batch_norm):
model.append(nn.BatchNorm2d(output_channels))
if(Relu):
model.append(nn.ReLU())
return nn.Sequential(*model)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
class VisualNet(nn.Module):
def __init__(self, original_resnet):
super(VisualNet, self).__init__()
layers = list(original_resnet.children())[0:-2]
self.feature_extraction = nn.Sequential(*layers) #features before conv1x1
def forward(self, x):
x = self.feature_extraction(x)
return x
class AudioNet(nn.Module):
def __init__(self, ngf=64, input_nc=2, output_nc=2):
super(AudioNet, self).__init__()
#initialize layers
self.audionet_convlayer1 = unet_conv(input_nc, ngf)
self.audionet_convlayer2 = unet_conv(ngf, ngf * 2)
self.audionet_convlayer3 = unet_conv(ngf * 2, ngf * 4)
self.audionet_convlayer4 = unet_conv(ngf * 4, ngf * 8)
self.audionet_convlayer5 = unet_conv(ngf * 8, ngf * 8)
self.audionet_upconvlayer1 = unet_upconv(1296, ngf * 8) #1296 (audio-visual feature) = 784 (visual feature) + 512 (audio feature)
self.audionet_upconvlayer2 = unet_upconv(ngf * 16, ngf *4)
self.audionet_upconvlayer3 = unet_upconv(ngf * 8, ngf * 2)
self.audionet_upconvlayer4 = unet_upconv(ngf * 4, ngf)
self.audionet_upconvlayer5 = unet_upconv(ngf * 2, output_nc, True) #outermost layer use a sigmoid to bound the mask
self.conv1x1 = create_conv(512, 8, 1, 0) #reduce dimension of extracted visual features
def forward(self, x, visual_feat):
audio_conv1feature = self.audionet_convlayer1(x)
audio_conv2feature = self.audionet_convlayer2(audio_conv1feature)
audio_conv3feature = self.audionet_convlayer3(audio_conv2feature)
audio_conv4feature = self.audionet_convlayer4(audio_conv3feature)
audio_conv5feature = self.audionet_convlayer5(audio_conv4feature)
visual_feat = self.conv1x1(visual_feat)
visual_feat = visual_feat.view(visual_feat.shape[0], -1, 1, 1) #flatten visual feature
visual_feat = visual_feat.repeat(1, 1, audio_conv5feature.shape[-2], audio_conv5feature.shape[-1]) #tile visual feature
audioVisual_feature = torch.cat((visual_feat, audio_conv5feature), dim=1)
audio_upconv1feature = self.audionet_upconvlayer1(audioVisual_feature)
audio_upconv2feature = self.audionet_upconvlayer2(torch.cat((audio_upconv1feature, audio_conv4feature), dim=1))
audio_upconv3feature = self.audionet_upconvlayer3(torch.cat((audio_upconv2feature, audio_conv3feature), dim=1))
audio_upconv4feature = self.audionet_upconvlayer4(torch.cat((audio_upconv3feature, audio_conv2feature), dim=1))
mask_prediction = self.audionet_upconvlayer5(torch.cat((audio_upconv4feature, audio_conv1feature), dim=1)) * 2 - 1
return mask_prediction
| 2.5D-Visual-Sound-main | models/networks.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
class BaseLoss(nn.Module):
def __init__(self):
super(BaseLoss, self).__init__()
def forward(self, preds, targets, weight=None):
if isinstance(preds, list):
N = len(preds)
if weight is None:
weight = preds[0].new_ones(1)
errs = [self._forward(preds[n], targets[n], weight[n])
for n in range(N)]
err = torch.mean(torch.stack(errs))
elif isinstance(preds, torch.Tensor):
if weight is None:
weight = preds.new_ones(1)
err = self._forward(preds, targets, weight)
return err
class L1Loss(BaseLoss):
def __init__(self):
super(L1Loss, self).__init__()
def _forward(self, pred, target, weight):
return torch.mean(weight * torch.abs(pred - target))
class L2Loss(BaseLoss):
def __init__(self):
super(L2Loss, self).__init__()
def _forward(self, pred, target, weight):
return torch.mean(weight * torch.pow(pred - target, 2))
class MSELoss(BaseLoss):
def __init__(self):
super(MSELoss, self).__init__()
def _forward(self, pred, target):
return F.mse_loss(pred, target)
class BCELoss(BaseLoss):
def __init__(self):
super(BCELoss, self).__init__()
def _forward(self, pred, target, weight):
return F.binary_cross_entropy(pred, target, weight=weight)
class BCEWithLogitsLoss(BaseLoss):
def __init__(self):
super(BCEWithLogitsLoss, self).__init__()
def _forward(self, pred, target, weight):
return F.binary_cross_entropy_with_logits(pred, target, weight=weight)
| 2.5D-Visual-Sound-main | models/criterion.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.utils.data as data
from PIL import Image
import torchvision.transforms as transforms
class BaseDataset(data.Dataset):
def __init__(self):
super(BaseDataset, self).__init__()
def name(self):
return 'BaseDataset'
def initialize(self, opt):
pass
| 2.5D-Visual-Sound-main | data/base_dataset.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def CreateDataLoader(opt):
from data.custom_dataset_data_loader import CustomDatasetDataLoader
data_loader = CustomDatasetDataLoader()
print(data_loader.name())
data_loader.initialize(opt)
return data_loader
| 2.5D-Visual-Sound-main | data/data_loader.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
class BaseDataLoader():
def __init__(self):
pass
def initialize(self, opt):
self.opt = opt
pass
def load_data():
return None
| 2.5D-Visual-Sound-main | data/base_data_loader.py |
2.5D-Visual-Sound-main | data/__init__.py |
|
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch.utils.data
from data.base_data_loader import BaseDataLoader
def CreateDataset(opt):
dataset = None
if opt.model == 'audioVisual':
from data.audioVisual_dataset import AudioVisualDataset
dataset = AudioVisualDataset()
else:
raise ValueError("Dataset [%s] not recognized." % opt.model)
print("dataset [%s] was created" % (dataset.name()))
dataset.initialize(opt)
return dataset
class CustomDatasetDataLoader(BaseDataLoader):
def name(self):
return 'CustomDatasetDataLoader'
def initialize(self, opt):
BaseDataLoader.initialize(self, opt)
self.dataset = CreateDataset(opt)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.nThreads))
def load_data(self):
return self
def __len__(self):
return len(self.dataset)
def __iter__(self):
for i, data in enumerate(self.dataloader):
yield data
| 2.5D-Visual-Sound-main | data/custom_dataset_data_loader.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os.path
import time
import librosa
import h5py
import random
import math
import numpy as np
import glob
import torch
from PIL import Image, ImageEnhance
import torchvision.transforms as transforms
from data.base_dataset import BaseDataset
def normalize(samples, desired_rms = 0.1, eps = 1e-4):
rms = np.maximum(eps, np.sqrt(np.mean(samples**2)))
samples = samples * (desired_rms / rms)
return samples
def generate_spectrogram(audio):
spectro = librosa.core.stft(audio, n_fft=512, hop_length=160, win_length=400, center=True)
real = np.expand_dims(np.real(spectro), axis=0)
imag = np.expand_dims(np.imag(spectro), axis=0)
spectro_two_channel = np.concatenate((real, imag), axis=0)
return spectro_two_channel
def process_image(image, augment):
image = image.resize((480,240))
w,h = image.size
w_offset = w - 448
h_offset = h - 224
left = random.randrange(0, w_offset + 1)
upper = random.randrange(0, h_offset + 1)
image = image.crop((left, upper, left+448, upper+224))
if augment:
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(random.random()*0.6 + 0.7)
return image
class AudioVisualDataset(BaseDataset):
def initialize(self, opt):
self.opt = opt
self.audios = []
#load hdf5 file here
h5f_path = os.path.join(opt.hdf5FolderPath, opt.mode+".h5")
h5f = h5py.File(h5f_path, 'r')
self.audios = h5f['audio'][:]
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
vision_transform_list = [transforms.ToTensor(), normalize]
self.vision_transform = transforms.Compose(vision_transform_list)
def __getitem__(self, index):
#load audio
audio, audio_rate = librosa.load(self.audios[index], sr=self.opt.audio_sampling_rate, mono=False)
#randomly get a start time for the audio segment from the 10s clip
audio_start_time = random.uniform(0, 9.9 - self.opt.audio_length)
audio_end_time = audio_start_time + self.opt.audio_length
audio_start = int(audio_start_time * self.opt.audio_sampling_rate)
audio_end = audio_start + int(self.opt.audio_length * self.opt.audio_sampling_rate)
audio = audio[:, audio_start:audio_end]
audio = normalize(audio)
audio_channel1 = audio[0,:]
audio_channel2 = audio[1,:]
#get the frame dir path based on audio path
path_parts = self.audios[index].strip().split('/')
path_parts[-1] = path_parts[-1][:-4] + '.mp4'
path_parts[-2] = 'frames'
frame_path = '/'.join(path_parts)
# get the closest frame to the audio segment
#frame_index = int(round((audio_start_time + audio_end_time) / 2.0 + 0.5)) #1 frame extracted per second
frame_index = int(round(((audio_start_time + audio_end_time) / 2.0 + 0.05) * 10)) #10 frames extracted per second
frame = process_image(Image.open(os.path.join(frame_path, str(frame_index).zfill(6) + '.png')).convert('RGB'), self.opt.enable_data_augmentation)
frame = self.vision_transform(frame)
#passing the spectrogram of the difference
audio_diff_spec = torch.FloatTensor(generate_spectrogram(audio_channel1 - audio_channel2))
audio_mix_spec = torch.FloatTensor(generate_spectrogram(audio_channel1 + audio_channel2))
return {'frame': frame, 'audio_diff_spec':audio_diff_spec, 'audio_mix_spec':audio_mix_spec}
def __len__(self):
return len(self.audios)
def name(self):
return 'AudioVisualDataset'
| 2.5D-Visual-Sound-main | data/audioVisual_dataset.py |
import torch
import pytorch_lightning as pl
def pl_train(cfg, pl_model_class):
if cfg.seed is not None:
torch.manual_seed(cfg.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(cfg.seed)
model = pl_model_class(cfg.model, cfg.dataset, cfg.train)
if 'pl' in cfg and 'profile' in cfg.pl and cfg.pl.profile:
# profiler=pl.profiler.AdvancedProfiler(output_filename=cfg.train.profiler),
profiler_args = { 'profiler': pl.profiler.AdvancedProfiler(), }
else:
profiler_args = {}
if 'pl' in cfg and 'wandb' in cfg.pl and cfg.pl.wandb:
# kwargs['logger'] = WandbLogger(name=config['pl_wandb'], project='ops-memory-pl')
logger = WandbLogger(project='ops-memory-pl')
logger.log_hyperparams(cfg.model)
logger.log_hyperparams(cfg.dataset)
logger.log_hyperparams(cfg.train)
profiler_args['logger'] = logger
print("profiler args", profiler_args)
trainer = pl.Trainer(
# gpus=1 if config['gpu'] else None,
gpus=1,
gradient_clip_val=cfg.train.gradient_clip_val,
max_epochs=1 if cfg.smoke_test else cfg.train.epochs,
progress_bar_refresh_rate=1,
limit_train_batches=cfg.train.limit_train_batches,
track_grad_norm=2,
**profiler_args,
logger=False,
)
trainer.fit(model)
# trainer.test(model)
return trainer, model
| hippo-code-master | pl_runner.py |
import torch
from omegaconf.dictconfig import DictConfig
from munch import Munch
def remove_postfix(text, postfix):
if text.endswith(postfix):
return text[:-len(postfix)]
return text
# pytorch-lightning returns pytorch 0-dim tensor instead of python scalar
def to_scalar(x):
return x.item() if isinstance(x, torch.Tensor) else x
def dictconfig_to_munch(d):
"""Convert object of type OmegaConf to Munch so Wandb can log properly
Support nested dictionary.
"""
return Munch({k: dictconfig_to_munch(v) if isinstance(v, DictConfig)
else v for k, v in d.items()})
def munch_to_dictconfig(m):
return DictConfig({k: munch_to_dictconfig(v) if isinstance(v, Munch)
else v for k, v in m.items()})
| hippo-code-master | utils.py |
from pathlib import Path
project_root = Path(__file__).parent.absolute()
import os
# Add to $PYTHONPATH so that ray workers can see
os.environ['PYTHONPATH'] = str(project_root) + ":" + os.environ.get('PYTHONPATH', '')
import numpy as np
import torch
import pytorch_lightning as pl
import hydra
from omegaconf import OmegaConf
from model.model import Model
from datasets import DatasetBase
from model.exprnn.parametrization import get_parameters
from utils import to_scalar
class RNNTraining(pl.LightningModule):
def __init__(self, model_args, dataset_cfg, train_args):
super().__init__()
self.save_hyperparameters()
self.dataset_cfg = dataset_cfg
self.dataset = DatasetBase.registry[dataset_cfg.name](dataset_cfg)
self.train_args = train_args
self.model_args = model_args
# self.model_args.cell_args.max_length = self.dataset.N # TODO fix datasets
# cell_args = model_args.cell_args
# other_args = {k: v for k, v in model_args.items() if k not in ['cell', 'cell_args', 'dropout']}
self.model = Model(
self.dataset.input_size,
self.dataset.output_size,
# model_args.cell,
# cell_args=cell_args,
output_len=self.dataset.output_len,
# dropout=model_args.dropout,
# max_length=self.dataset.N,
**model_args,
)
def forward(self, input):
self.model.forward(input)
def _shared_step(self, batch, batch_idx, prefix='train'):
batch_x, batch_y, *len_batch = batch
# Either fixed length sequence or variable length
len_batch = None if not len_batch else len_batch[0]
out = self.model(batch_x, len_batch)
loss = self.dataset.loss(out, batch_y, len_batch)
metrics = self.dataset.metrics(out, batch_y)
metrics = {f'{prefix}_{k}': v for k, v in metrics.items()}
self.log(f'{prefix}_loss', loss, on_epoch=True, prog_bar=False)
self.log_dict(metrics, on_epoch=True, prog_bar=True)
return loss
def training_step(self, batch, batch_idx):
return self._shared_step(batch, batch_idx, prefix='train')
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return (self._shared_step(batch, batch_idx, prefix='val') if dataloader_idx == 0 else
self._shared_step(batch, batch_idx, prefix='test'))
def test_step(self, batch, batch_idx):
return self._shared_step(batch, batch_idx, prefix='test')
def configure_optimizers(self):
name_to_opt = {'adam': torch.optim.Adam, 'rmsprop': torch.optim.RMSprop}
optimizer = name_to_opt[self.train_args.optimizer]
if self.model_args.cell == 'exprnn' or self.model_args.cell_args.get('orthogonal', False):
non_orth_params, log_orth_params = get_parameters(self.model)
return optimizer([
{'params': non_orth_params, 'lr': self.train_args.lr, 'weight_decay': self.train_args.wd},
# {'params': log_orth_params, 'lr': self.train_args.lr_orth},
{'params': log_orth_params, 'lr': self.train_args.lr/10.0},
])
else:
return optimizer(self.model.parameters(), lr=self.train_args.lr)
def prepare_data(self):
self.dataset.prepare_data()
kwargs = {'num_workers': self.dataset_cfg.num_workers, 'pin_memory': True}
self.dataset.prepare_dataloader(self.train_args.batch_size, **kwargs)
def train_dataloader(self):
return self.dataset.train_loader
def val_dataloader(self):
return [self.dataset.val_loader, self.dataset.test_loader]
def test_dataloader(self):
return self.dataset.test_loader
@hydra.main(config_path="cfg", config_name="config.yaml")
def main(cfg: OmegaConf):
# We want to add fields to cfg so need to call OmegaConf.set_struct
OmegaConf.set_struct(cfg, False)
print(OmegaConf.to_yaml(cfg))
if cfg.runner.name == 'pl':
from pl_runner import pl_train
trainer, model = pl_train(cfg, RNNTraining)
elif cfg.runner.name == 'ray':
# Shouldn't need to install ray unless doing distributed training
from ray_runner import ray_train
ray_train(cfg, RNNTraining)
else:
assert False, 'Only pl and ray runners are supported'
if __name__ == "__main__":
main()
| hippo-code-master | train.py |
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, BuildExtension
ext_modules = []
extension = CppExtension('hippo', ['hippo.cpp', 'hippolegs.cpp', 'hippolegt.cpp'], extra_compile_args=['-march=native'])
ext_modules.append(extension)
setup(
name='hippo',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension})
| hippo-code-master | csrc/setup.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch.utils.data.dataset import IterableDataset
import numpy as np
def np_copying_data(L, M, A, batch_shape=()):
seq = np.random.randint(low=1, high=A-1, size=batch_shape+(M,))
zeros_x = np.zeros(batch_shape+(L,))
markers = (A-1) * np.ones(batch_shape+(M,))
zeros_y = np.zeros(batch_shape+(M+L,))
x_ = np.concatenate([seq, zeros_x, markers], axis=-1)
y_ = np.concatenate([zeros_y, seq], axis=-1)
x = F.one_hot(torch.tensor(x_, dtype=torch.int64), A).float()
y = torch.tensor(y_, dtype=torch.int64)
return x, y
def torch_copying_data(L, M, A, variable=False, batch_shape=()):
tokens = torch.randint(low=1, high=A-1, size=batch_shape+(M,))
if variable:
total_batch = np.prod(batch_shape)
inds = torch.stack([
torch.randperm(L+M)[:M]
for _ in range(total_batch)
], 0)
inds = inds.reshape(batch_shape+(M,))
inds, _ = inds.sort()
else:
inds = torch.arange(M).repeat(batch_shape+(1,))
zeros_x = torch.zeros(batch_shape+(M+L,), dtype=torch.long)
zeros_x.scatter_(-1, inds, tokens)
markers = (A-1) * torch.ones(batch_shape+(M,), dtype=torch.long)
x_ = torch.cat([zeros_x, markers], dim=-1)
y_ = torch.cat([tokens], dim=-1)
x = F.one_hot(x_, A).float()
y = y_
return x, y
def copying_static_dataset(L, M, A, variable, samples):
all_x, all_y = torch_copying_data(L, M, A, variable, batch_shape=(samples,))
print("Constructing Copying dataset of shape", all_x.shape)
ds = torch.utils.data.TensorDataset(all_x, all_y)
return ds
| hippo-code-master | datasets/copying.py |
import torch
from torch import nn
from torch.nn import functional as F
class Task:
@staticmethod
def metrics(outs, y, len_batch=None):
return {}
@staticmethod
def metrics_epoch(outs, y, len_batch=None):
return {}
class BinaryClassification(Task):
@staticmethod
def loss(logits, y, len_batch=None):
# BCE loss requires squeezing last dimension of logits so it has the same shape as y
return F.binary_cross_entropy_with_logits(logits.squeeze(-1), y.float())
@staticmethod
def metrics(logits, y, len_batch=None):
return {'accuracy': torch.eq(logits.squeeze(-1) >= 0, y).float().mean()}
@staticmethod
def metrics_epoch(logits, y, len_batch=None):
return BinaryClassification.metrics(torch.cat(logits), torch.cat(y), len_batch)
class MulticlassClassification(Task):
@staticmethod
def loss(logits, y, len_batch=None):
return F.cross_entropy(logits, y)
@staticmethod
def metrics(logits, y, len_batch=None):
return {'accuracy': torch.eq(torch.argmax(logits, dim=-1), y).float().mean()}
@staticmethod
def metrics_epoch(logits, y, len_batch=None):
return MulticlassClassification.metrics(torch.cat(logits, dim=0), torch.cat(y, dim=0), len_batch)
class MSERegression(Task):
@staticmethod
def loss(outs, y, len_batch=None):
if len_batch is None:
return F.mse_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.mse_loss(outs_masked, y_masked)
| hippo-code-master | datasets/tasks.py |
""" Load data for UEA datasets, in particular CharacterTrajectories
Adapted from https://github.com/patrick-kidger/NeuralCDE/blob/master/experiments/datasets/uea.py
"""
import os
import pathlib
import urllib.request
import zipfile
import sklearn.model_selection
import sktime.utils.data_io
import numpy as np
import torch
import collections as co
# TODO deal with this path properly as an option
here = pathlib.Path(__file__).resolve().parent
valid_dataset_names = {
'ArticularyWordRecognition',
'FaceDetection',
'NATOPS',
'AtrialFibrillation',
'FingerMovements',
'PEMS - SF',
'BasicMotions',
'HandMovementDirection',
'PenDigits',
'CharacterTrajectories',
'Handwriting',
'PhonemeSpectra',
'Cricket',
'Heartbeat',
'RacketSports',
'DuckDuckGeese',
'InsectWingbeat',
'SelfRegulationSCP1',
'EigenWorms',
'JapaneseVowels',
'SelfRegulationSCP2',
'Epilepsy',
'Libras',
'SpokenArabicDigits',
'ERing',
'LSST',
'StandWalkJump',
'EthanolConcentration',
'MotorImagery',
'UWaveGestureLibrary',
}
def download():
""" Download data if not exists """
base_base_loc = here / 'data'
base_loc = base_base_loc / 'UEA'
loc = base_loc / 'Multivariate2018_ts.zip'
if os.path.exists(loc):
return
if not os.path.exists(base_base_loc):
os.mkdir(base_base_loc)
if not os.path.exists(base_loc):
os.mkdir(base_loc)
urllib.request.urlretrieve('http://www.timeseriesclassification.com/Downloads/Archives/Multivariate2018_ts.zip',
str(loc))
with zipfile.ZipFile(loc, 'r') as f:
f.extractall(str(base_loc))
def load_data(dataset_name):
""" Load X, y numpy data for given dataset """
assert dataset_name in valid_dataset_names, "Must specify a valid dataset name."
base_filename = here / 'data' / 'UEA' / 'Multivariate_ts' / dataset_name / dataset_name
train_X, train_y = sktime.utils.data_io.load_from_tsfile_to_dataframe(str(base_filename) + '_TRAIN.ts')
test_X, test_y = sktime.utils.data_io.load_from_tsfile_to_dataframe(str(base_filename) + '_TEST.ts')
train_X = train_X.to_numpy()
test_X = test_X.to_numpy()
X = np.concatenate((train_X, test_X), axis=0)
y = np.concatenate((train_y, test_y), axis=0)
return X, y
def save_data(dir, **tensors):
for tensor_name, tensor_value in tensors.items():
torch.save(tensor_value, str(dir / tensor_name) + '.pt')
def load_processed_data(dir):
tensors = {}
for filename in os.listdir(dir):
if filename.endswith('.pt'):
tensor_name = filename.split('.')[0]
tensor_value = torch.load(str(dir / filename))
tensors[tensor_name] = tensor_value
return tensors
def wrap_data(train_X, val_X, test_X, train_y, val_y, test_y, train_final_index, val_final_index,
test_final_index,
):
""" Wrap data into Pytorch Dataset. """
train_dataset = torch.utils.data.TensorDataset(train_X, train_y,
# train_final_index
)
val_dataset = torch.utils.data.TensorDataset(val_X, val_y,
# val_final_index
)
test_dataset = torch.utils.data.TensorDataset(test_X, test_y,
# test_final_index
)
return train_dataset, val_dataset, test_dataset
def split_data(tensor, stratify):
# 0.7/0.15/0.15 train/val/test split
(train_tensor, testval_tensor,
train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify,
train_size=0.7,
random_state=0,
shuffle=True,
stratify=stratify)
val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor,
train_size=0.5,
random_state=1,
shuffle=True,
stratify=testval_stratify)
return train_tensor, val_tensor, test_tensor
def normalize_data(X, y):
""" Normalize data by training statistics per channel.
X: data tensor with channels as last dimension
"""
train_X, _, _ = split_data(X, y)
out = []
for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)):
train_Xi_nonan = train_Xi.masked_select(~torch.isnan(train_Xi))
mean = train_Xi_nonan.mean() # compute statistics using only training data.
std = train_Xi_nonan.std()
out.append((Xi - mean) / (std + 1e-5))
out = torch.stack(out, dim=-1)
return out
def preprocess_data(
X, y,
final_index,
# append_times,
append_intensity,
):
X = normalize_data(X, y)
# Append extra channels together. Note that the order here: time, intensity, original, is important, and some models
# depend on that order.
augmented_X = []
# if append_times:
# augmented_X.append(times.unsqueeze(0).repeat(X.size(0), 1).unsqueeze(-1))
if append_intensity: # Note this will append #channels copies of the same intensity
intensity = ~torch.isnan(X) # of size (batch, stream, channels)
intensity = intensity.to(X.dtype).cumsum(dim=1)
augmented_X.append(intensity)
augmented_X.append(X)
if len(augmented_X) == 1:
X = augmented_X[0]
else:
X = torch.cat(augmented_X, dim=2)
train_X, val_X, test_X = split_data(X, y) # TODO split data should just return y? or list of indices corresponding to splits
train_y, val_y, test_y = split_data(y, y)
train_final_index, val_final_index, test_final_index = split_data(final_index, y)
# train_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, train_X)
# val_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, val_X)
# test_coeffs = controldiffeq.natural_cubic_spline_coeffs(times, test_X)
in_channels = X.size(-1)
return (
# times,
# train_coeffs, val_coeffs, test_coeffs,
train_X, val_X, test_X,
train_y, val_y, test_y,
train_final_index, val_final_index, test_final_index,
in_channels
)
def process_data(dataset_name, intensity):
# We begin by loading both the train and test data and using our own train/val/test split.
# The reason for this is that (a) by default there is no val split and (b) the sizes of the train/test splits are
# really janky by default. (e.g. LSST has 2459 training samples and 2466 test samples.)
X, y = load_data(dataset_name)
lengths = torch.tensor([len(Xi[0]) for Xi in X])
final_index = lengths - 1
maxlen = lengths.max()
# X is now a numpy array of shape (batch, channel)
# Each channel is a pandas.core.series.Series object of length corresponding to the length of the time series
def _pad(channel, maxlen):
channel = torch.tensor(channel)
out = torch.full((maxlen,), channel[-1])
out[:channel.size(0)] = channel
return out
X = torch.stack([torch.stack([_pad(channel, maxlen) for channel in batch], dim=0) for batch in X], dim=0)
# X is now a tensor of shape (batch, channel, length)
X = X.transpose(-1, -2)
# X is now a tensor of shape (batch, length, channel)
times = torch.linspace(0, X.size(1) - 1, X.size(1))
# generator = torch.Generator().manual_seed(56789)
# for Xi in X:
# removed_points = torch.randperm(X.size(1), generator=generator)[:int(X.size(1) * missing_rate)].sort().values
# Xi[removed_points] = float('nan')
# Now fix the labels to be integers from 0 upwards
targets = co.OrderedDict()
counter = 0
for yi in y:
if yi not in targets:
targets[yi] = counter
counter += 1
y = torch.tensor([targets[yi] for yi in y])
(train_X, val_X, test_X,
train_y, val_y, test_y,
train_final_index, val_final_index,
test_final_index,
input_channels) = preprocess_data(
X, y, final_index,
# append_times=True,
append_intensity=intensity,
)
num_classes = counter
assert num_classes >= 2, f"Have only {num_classes} classes."
return (
# times,
train_X, val_X, test_X,
train_y, val_y, test_y,
train_final_index, val_final_index, test_final_index,
num_classes, input_channels
)
def get_data(
dataset_name,
intensity,
train_hz=1,
eval_hz=1,
timestamp=False,
train_ts=1,
eval_ts=1,
):
# We begin by loading both the train and test data and using our own train/val/test split.
# The reason for this is that (a) by default there is no val split and (b) the sizes of the train/test splits are
# really janky by default. (e.g. LSST has 2459 training samples and 2466 test samples.)
assert dataset_name in valid_dataset_names, "Must specify a valid dataset name."
base_base_loc = here / 'processed_data'
base_loc = base_base_loc / 'UEA'
loc = base_loc / (dataset_name + ('_intensity' if intensity else ''))
try:
tensors = load_processed_data(loc)
train_X = tensors['train_X']
val_X = tensors['val_X']
test_X = tensors['test_X']
train_y = tensors['train_y']
val_y = tensors['val_y']
test_y = tensors['test_y']
train_final_index = tensors['train_final_index']
val_final_index = tensors['val_final_index']
test_final_index = tensors['test_final_index']
num_classes = int(tensors['num_classes'])
input_channels = int(tensors['input_channels'])
except:
print(f"Could not find preprocessed data. Loading {dataset_name}...")
download() # download the UEA data if necessary
if not os.path.exists(base_base_loc):
os.mkdir(base_base_loc)
if not os.path.exists(base_loc):
os.mkdir(base_loc)
if not os.path.exists(loc):
os.mkdir(loc)
( train_X, val_X, test_X, train_y, val_y, test_y, train_final_index, val_final_index,
test_final_index, num_classes, input_channels ) = process_data(dataset_name, intensity)
save_data(
loc,
train_X=train_X, val_X=val_X, test_X=test_X,
train_y=train_y, val_y=val_y, test_y=test_y, train_final_index=train_final_index,
val_final_index=val_final_index, test_final_index=test_final_index,
num_classes=torch.as_tensor(num_classes), input_channels=torch.as_tensor(input_channels),
)
return (
train_X, val_X, test_X,
train_y, val_y, test_y,
train_final_index, val_final_index, test_final_index,
num_classes, input_channels,
)
def _subsample(X, hz=1, uniform=True):
""" Subsample X non-uniformly at hz frequency, append timestamps """
L = X.shape[1]
# create subsampler
if uniform:
removed_points = torch.arange(int(L*hz)) // hz
removed_points = removed_points.to(int)
time_gen = lambda: removed_points
else:
generator = torch.Generator().manual_seed(56789)
time_gen = lambda: torch.randperm(L, generator=generator)[:int(L*hz)].sort().values
X_ = []
T_ = []
for Xi in X:
times = time_gen()
Xi_ = Xi[times]
times_ = times.to(torch.float32).unsqueeze(-1)
X_.append(Xi_)
T_.append(times_)
return torch.stack(X_, dim=0), torch.stack(T_, dim=0)
def postprocess_data(
train_X, val_X, test_X,
train_y, val_y, test_y,
train_final_index, val_final_index, test_final_index,
train_hz=1,
eval_hz=1,
train_uniform=True,
eval_uniform=True,
timestamp=False,
train_ts=1,
eval_ts=1,
):
"""
train_hz, eval_hz: subsampling multiplier of original data
e.g. train_hz=0.5 means data is sampled at half speed, so remove every other element of the sequence
Since the original data is sampled from a trajectory at 200Hz, this corresponds to a sampling rate of 100Hz
train_uniform, eval_uniform: whether subsampling is uniformly spaced or random
timestamp: data comes with timestamps
train_ts, eval_ts: timestamp multiplier
Example configurations:
train_hz=1.0, eval_hz=0.5, {train,eval}_uniform=True, timestamp=False
- non-timestamped, uniformly sampled data, where evaluation sequences have every other element removed
{train,eval}_uniform=False, timestamp=True, train_ts=1.0, eval_ts=0.5
- timestamped, randomly sampled data, where evaluation sequences have timestamps halved
Both of the above configurations test train->evaluation generalization of halving the timescale frequency, either from the measurement sampling rate decreasing (from 200Hz -> 100hz), or the subject drawing half as fast.
"""
train_X, train_T = _subsample(train_X, train_hz, train_uniform)
val_X, val_T = _subsample(val_X, eval_hz, eval_uniform)
test_X, test_T = _subsample(test_X, eval_hz, eval_uniform)
if timestamp:
train_X = torch.cat([train_ts*train_T, train_X], dim=-1)
val_X = torch.cat([eval_ts*val_T, val_X], dim=-1)
test_X = torch.cat([eval_ts*test_T, test_X], dim=-1)
train_dataset, val_dataset, test_dataset = wrap_data(
train_X, val_X, test_X,
train_y, val_y, test_y,
train_final_index, val_final_index, test_final_index
)
return train_dataset, val_dataset, test_dataset
if __name__ == '__main__':
*data, numclasses, input_channels = get_data(
'CharacterTrajectories',
intensity=False,
)
train_dataset, val_dataset, test_dataset = postprocess_data(
*data,
train_hz=1,
eval_hz=0.5,
train_uniform=True,
eval_uniform=False,
timestamp=True,
train_ts=1,
eval_ts=0.5,
)
| hippo-code-master | datasets/uea.py |
import os
dir_path = os.path.dirname(os.path.abspath(__file__))
import random
import torch
from torch import nn
from torch.nn import functional as F
from torchvision import datasets, transforms
from . import copying, adding
from . import utils
from .tasks import BinaryClassification, MulticlassClassification, MSERegression
class DatasetBase():
registry = {}
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Only register classes with @name attribute
if hasattr(cls, 'name'):
cls.registry[cls.name] = cls
def __init__(self, dataset_cfg, path=dir_path):
self.dataset_cfg = dataset_cfg
self.path = path
def prepare_data(self):
raise NotImplementedError
def split_train_val(self, ratio=0.9):
train_len = int(len(self.train) * ratio)
self.train, self.val = torch.utils.data.random_split(self.train, (train_len, len(self.train) - train_len))
def prepare_dataloader(self, batch_size, **kwargs):
self.train_loader = torch.utils.data.DataLoader(self.train, batch_size=batch_size, shuffle=True, **kwargs)
self.val_loader = torch.utils.data.DataLoader(self.val, batch_size=batch_size, shuffle=False, **kwargs)
self.test_loader = torch.utils.data.DataLoader(self.test, batch_size=batch_size, shuffle=False, **kwargs)
def __str__(self):
return self.name if hasattr(self, 'name') else self.__name__
class MNIST(DatasetBase, MulticlassClassification):
name = 'mnist'
input_size = 1
output_size = 10
output_len = 0
N = 784
def prepare_data(self):
transform_list = [transforms.ToTensor(),
transforms.Lambda(lambda x: x.view(self.input_size, self.N).t())] # (N, input_size)
if self.dataset_cfg.permute:
# below is another permutation that other works have used
# permute = np.random.RandomState(92916)
# permutation = torch.LongTensor(permute.permutation(784))
permutation = utils.bitreversal_permutation(self.N)
transform_list.append(transforms.Lambda(lambda x: x[permutation]))
transform = transforms.Compose(transform_list)
self.train = datasets.MNIST(f'{self.path}/{self.name}', train=True, download=True, transform=transform)
self.test = datasets.MNIST(f'{self.path}/{self.name}', train=False, transform=transform)
self.split_train_val()
def __str__(self):
return f"{'p' if self.dataset_cfg.permute else 's'}{self.name}"
class Copying(DatasetBase, MulticlassClassification):
name = 'copying'
def __init__(self, dataset_cfg, path=dir_path):
super().__init__(dataset_cfg, path)
self.input_size = dataset_cfg.A
self.output_size = dataset_cfg.A
self.output_len = dataset_cfg.M
self.N = dataset_cfg.L + 2 * dataset_cfg.M
def prepare_data(self):
cfg = self.dataset_cfg
self.train = copying.copying_static_dataset(cfg.L, cfg.M, cfg.A, cfg.variable, cfg.samples)
self.test = copying.copying_static_dataset(cfg.L, cfg.M, cfg.A, cfg.variable, cfg.test_samples)
self.split_train_val()
def __str__(self):
return f"{self.name}{self.dataset_cfg.L}{'v' if self.dataset_cfg.variable else ''}"
class Adding(DatasetBase, MSERegression):
name = 'adding'
def __init__(self, dataset_cfg, path=dir_path):
super().__init__(dataset_cfg, path)
self.input_size = 2
self.output_size = 1
self.output_len = 0
self.N = dataset_cfg.L
def prepare_data(self):
cfg = self.dataset_cfg
self.train = adding.adding_static_dataset(cfg.L, cfg.samples)
self.test = adding.adding_static_dataset(cfg.L, cfg.test_samples)
self.split_train_val()
def __str__(self):
return f"{self.name}{self.dataset_cfg.L}"
# Wrap the data loader with callback function
class LoaderWCallback:
def __init__(self, loader, callback_fn):
self.loader = loader
self.callback_fn = callback_fn
def __len__(self):
return len(self.loader)
def __iter__(self):
self.loader_iter = iter(self.loader)
return self
def __next__(self):
return self.callback_fn(next(self.loader_iter))
class IMDB(DatasetBase, BinaryClassification):
name = 'imdb'
output_size = 1
output_len = 0
def __init__(self, dataset_cfg, path=dir_path):
super().__init__(dataset_cfg, path)
self.input_size = dataset_cfg.vocab_size
self.N = dataset_cfg.max_length
# https://github.com/bentrevett/pytorch-sentiment-analysis/issues/6
def tokenize_once(self):
import torchtext
from torchtext import data
TEXT = data.Field(tokenize='spacy')
LABEL = data.LabelField()
train_data, test_data = torchtext.datasets.IMDB.splits(TEXT, LABEL, root=f'{self.path}')
train_examples = [vars(t) for t in train_data]
test_examples = [vars(t) for t in test_data]
import json
with open(f'{self.path}/{self.name}/train.json', 'w+') as f:
for example in train_examples:
json.dump(example, f)
f.write('\n')
with open(f'{self.path}/{self.name}/test.json', 'w+') as f:
for example in test_examples:
json.dump(example, f)
f.write('\n')
def prepare_data(self):
if not os.path.exists(f'{self.path}/{self.name}/train.json'):
self.tokenize_once()
import torchtext
from torchtext import data
TEXT = data.Field(batch_first=True, include_lengths=True)
LABEL = data.LabelField(dtype=torch.float)
fields = {'text': ('text', TEXT), 'label': ('label', LABEL)}
self.train, self.test = data.TabularDataset.splits(
path = f'{self.path}/{self.name}',
train = 'train.json',
test = 'test.json',
format = 'json',
fields = fields
)
self.train, self.val = self.train.split(0.9)
TEXT.build_vocab(self.train, max_size=self.input_size - 2) # Need 2 extra for <unk> and <pad>
LABEL.build_vocab(self.train)
def prepare_dataloader(self, batch_size, **kwargs):
from torchtext import data
self.train_loader, self.val_loader, self.test_loader = data.BucketIterator.splits(
(self.train, self.val, self.test),
shuffle=True,
sort_key=lambda ex: len(ex.text),
batch_size = batch_size)
def postprocess(batch): # make the loader from torchtext compatible with Pytorch's loader
x, lens = batch.text
x = x[:self.N]
lens = torch.clamp(lens, max=self.N)
return x, batch.label, lens
self.train_loader = LoaderWCallback(self.train_loader, postprocess)
self.val_loader = LoaderWCallback(self.val_loader, postprocess)
self.test_loader = LoaderWCallback(self.test_loader, postprocess)
class CharacterTrajectories(DatasetBase, MulticlassClassification):
""" CharacterTrajectories dataset from the UCI Machine Learning archive.
See datasets.uea.postprocess_data for dataset configuration settings.
"""
name = 'ct'
input_size = 3
output_size = 20
output_len = 0
def __init__(self, dataset_cfg, path=dir_path):
super().__init__(dataset_cfg, path)
if self.dataset_cfg.timestamp:
self.input_size += 1
def prepare_data(self):
from datasets import uea
cfg = self.dataset_cfg
*data, num_classes, input_channels = uea.get_data(
'CharacterTrajectories',
intensity=False,
)
train_dataset, val_dataset, test_dataset = uea.postprocess_data(
*data,
train_hz=cfg.train_hz,
eval_hz=cfg.eval_hz,
train_uniform=cfg.train_uniform,
eval_uniform=cfg.eval_uniform,
timestamp=cfg.timestamp,
train_ts=cfg.train_ts,
eval_ts=cfg.eval_ts,
)
self.train = train_dataset
self.val = val_dataset
self.test = test_dataset
assert num_classes == self.output_size, f"Output size should be {num_classes}"
| hippo-code-master | datasets/__init__.py |
import math
import numpy as np
import torch
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_permutation(n):
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
perm = bitreversal_po2(N)
return np.extract(perm < n, perm)
# For language modeling
# Adapted from https://github.com/salesforce/awd-lstm-lm/blob/master/utils.py
def repackage_hidden(h):
"""Wraps hidden states in new Tensors,
to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data
def get_batch(source, i, seq_len):
seq_len = min(seq_len, len(source) - 1 - i)
data = source[i:i+seq_len].t()
target = source[i+1:i+1+seq_len].t().reshape(-1)
return data, target
| hippo-code-master | datasets/utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch.utils.data.dataset import IterableDataset
import numpy as np
def torch_adding_data(L, batch_shape=()):
assert L >= 2
mid = L//2
idx0 = torch.randint(low=0, high=mid, size=batch_shape)
idx1 = torch.randint(low=0, high=L-mid, size=batch_shape)
idx = torch.cat((F.one_hot(idx0, mid), F.one_hot(idx1, L-mid)), dim=-1).float() # (batch_shape, L)
unif = torch.empty(batch_shape+(L,))
unif.uniform_(0., 1.)
x = torch.stack((unif, idx), dim=-1) # (batch_shape, L, 2)
y = torch.sum(unif*idx, dim=-1, keepdim=True) # (batch_shape, 1)
return x, y
def adding_static_dataset(L, samples):
all_x, all_y = torch_adding_data(L, batch_shape=(samples,))
print("Constructing Adding dataset of shape", all_x.shape)
ds = torch.utils.data.TensorDataset(all_x, all_y)
return ds
| hippo-code-master | datasets/adding.py |
import math
import unittest
import numpy as np
from scipy import linalg as la
import torch
import torch.nn.functional as F
import hippo
# from .op import transition
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures """
if measure == 'lagt':
# A_l = (1 - dt / 4) * np.eye(N) + dt / 2 * np.tril(np.ones((N, N)))
# A_r = (1 + dt / 4) * np.eye(N) - dt / 2 * np.tril(np.ones((N, N)))
# alpha = dt / 2 / (1 - dt / 4)
# col = -alpha / (1 + alpha) ** np.arange(1, N + 1)
# col[0] += 1
# A_l_inv = la.toeplitz(col / (1 - dt / 4), np.zeros(N))
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
if measure == 'tlagt':
# beta = 1 corresponds to no tilt
# b = measure_args['beta']
b = measure_args.get('beta', 1.0)
A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
return A, B
def slo(input, N, d_t=1.0, method='trapezoidal'):
q = np.arange(N)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)
# d, V = np.linalg.eig(A)
# d, V = d[::-1], V[:, ::-1]
c = np.zeros(N, dtype=np.float64)
c[0] = input[0]
for t in range(1, input.shape[0]):
At = A / t
Bt = B / t
u = input[t]
if method == 'euler' or method == 'forward_diff':
c = (np.eye(N) + d_t * At) @ c + d_t * Bt * u
elif method == 'backward_diff' or method == 'backward_euler':
c = la.solve_triangular(np.eye(N) - d_t * At, c + d_t * Bt * u, lower=True)
elif method == 'bilinear' or method == 'tustin' or method == 'trapezoidal':
c = la.solve_triangular(np.eye(N) - d_t / 2 * At, (np.eye(N) + d_t / 2 * At) @ c + d_t * Bt * u, lower=True)
elif method == 'zoh':
# aa, bb, _, _, _ = signal.cont2discrete((A, B[:, None], np.ones((1, N)), np.zeros((1,))), dt=math.log(t + d_t) - math.log(t), method='zoh')
# bb = bb.squeeze(-1)
aa = la.expm(A * (math.log(t + d_t) - math.log(t)))
bb = la.solve_triangular(A, aa @ B - B, lower=True)
c = aa @ c + bb * f(t)
else:
assert False, f'method {method} not supported'
# f_approx = (c @ (T @ ss.eval_legendre(np.arange(N)[:, None], 2 * t_vals / T_max - 1)))
return c
class LegSTest(unittest.TestCase):
def setUp(self):
self.rtol = 10
self.atol = 1e-3
def test_legs_euler_forward_cpu(self):
batch_size = 10
memsize = 23
memorder = 587
dt = 0.27
# batch_size = 1
# memsize = 1
# memorder = 5
# dt = 0.5
A, B = transition('legs', memorder)
A = torch.Tensor(A)
B = torch.Tensor(B).squeeze(-1)
x = torch.randn(batch_size, memsize, memorder)
input = torch.randn(batch_size, memsize)
out = hippo.legs_euler_forward(x, input, dt)
out_torch = x + dt * F.linear(x, A) + dt * input.unsqueeze(-1) * B
out_double = x.double() + dt * F.linear(x.double(), A.double()) + dt * input.unsqueeze(-1).double() * B.double()
err = (out - out_double).abs().max().item()
err_torch = (out_torch - out_double).abs().max().item()
# print(out_double)
print((out - out_double).abs().max().item())
print((out_torch - out_double).abs().max().item())
self.assertTrue(err <= err_torch * (1 + self.rtol) + self.atol,
((out - out_torch).abs().max().item()))
def test_legs_euler_backward_cpu(self):
batch_size = 10
memsize = 23
memorder = 587
dt = 0.27
# batch_size = 1
# memsize = 1
# memorder = 5
# dt = 0.5
A, B = transition('legs', memorder)
A_inv = la.solve_triangular(np.eye(memorder) - dt * A, np.eye(memorder), lower=True)
B_inv = la.solve_triangular(np.eye(memorder) - dt * A, B, lower=True)
A_inv = torch.Tensor(A_inv)
B_inv = torch.Tensor(B_inv).squeeze(-1)
x = torch.randn(batch_size, memsize, memorder)
input = torch.randn(batch_size, memsize)
out = hippo.legs_euler_backward(x, input, dt)
out_torch = F.linear(x, A_inv) + dt * input.unsqueeze(-1) * B_inv
out_double = F.linear(x.double(), A_inv.double()) + dt * input.unsqueeze(-1).double() * B_inv.double()
err = (out - out_double).abs().max().item()
err_torch = (out_torch - out_double).abs().max().item()
# print(out_double)
print((out - out_double).abs().max().item())
print((out_torch - out_double).abs().max().item())
self.assertTrue(err <= err_torch * (1 + self.rtol) + self.atol,
((out - out_torch).abs().max().item()))
def test_legs_trapezoidal_cpu(self):
batch_size = 10
memsize = 23
memorder = 587
dt = 0.27
# batch_size = 1
# memsize = 1
# memorder = 5
# dt = 0.5
A, B = transition('legs', memorder)
trap_A_inv = la.solve_triangular(np.eye(memorder) - dt / 2 * A, np.eye(memorder) + dt / 2 * A, lower=True)
trap_A_inv = torch.Tensor(trap_A_inv)
trap_B_inv = la.solve_triangular(np.eye(memorder) - dt / 2 * A, B, lower=True)
trap_B_inv = torch.Tensor(trap_B_inv).squeeze(-1)
x = torch.randn(batch_size, memsize, memorder)
input = torch.randn(batch_size, memsize)
out = hippo.legs_trapezoidal(x, input, dt)
out_torch = F.linear(x, trap_A_inv) + dt * input.unsqueeze(-1) * trap_B_inv
out_double = F.linear(x.double(), trap_A_inv.double()) + dt * input.unsqueeze(-1).double() * trap_B_inv.double()
err = (out - out_double).abs().max().item()
err_torch = (out_torch - out_double).abs().max().item()
# print(out_double)
print((out - out_double).abs().max().item())
print((out_torch - out_double).abs().max().item())
self.assertTrue(err <= err_torch * (1 + self.rtol) + self.atol,
((out - out_torch).abs().max().item()))
def test_function_approx(self):
length = int(1e3)
memorder = 256
input = torch.randn(length, dtype=torch.float64)
mem = hippo.legs_function_approx_trapezoidal(input, memorder)
mem_np = torch.Tensor(slo(input.cpu().numpy().astype(np.float64), memorder)).double()
self.assertTrue(torch.allclose(mem, mem_np))
def timeit(fn, nsteps):
import time
fn()
start = time.perf_counter()
for _ in range(nsteps):
fn()
end = time.perf_counter()
return (end - start) / nsteps
def benchmark():
torch.set_num_threads(1)
batch_size = 1
memsize = 1
memorder = 256
dt = 0.27
A, B = transition('legs', memorder)
A_inv = la.solve_triangular(np.eye(memorder) - dt * A, np.eye(memorder), lower=True)
B_inv = la.solve_triangular(np.eye(memorder) - dt * A, B, lower=True)
A_inv = torch.Tensor(A_inv)
B_inv = torch.Tensor(B_inv).squeeze(-1)
trap_A_inv = la.solve_triangular(np.eye(memorder) - dt / 2 * A, np.eye(memorder) + dt / 2 * A, lower=True)
trap_A_inv = torch.Tensor(trap_A_inv)
trap_B_inv = la.solve_triangular(np.eye(memorder) - dt / 2 * A, B, lower=True)
trap_B_inv = torch.Tensor(trap_B_inv).squeeze(-1)
A = torch.Tensor(A)
B = torch.Tensor(B).squeeze(-1)
x = torch.randn(batch_size, memsize, memorder)
input = torch.randn(batch_size, memsize)
nsteps = 10000
euler_forward_fn = lambda: hippo.legs_euler_forward(x, input, dt)
euler_forward_torch_fn = lambda: x + dt * F.linear(x, A) + dt * input.unsqueeze(-1) * B
euler_backward_fn = lambda: hippo.legs_euler_backward(x, input, dt)
euler_backward_torch_fn = lambda: F.linear(x, A_inv) + dt * input.unsqueeze(-1) * B_inv
trapezoidal_fn = lambda: hippo.legs_trapezoidal(x, input, dt)
trapezoidal_torch_fn = lambda: F.linear(x, trap_A_inv) + dt * input.unsqueeze(-1) * trap_B_inv
print(f'Euler forward C++: {timeit(euler_forward_fn, nsteps)}s')
print(f'Euler backward C++: {timeit(euler_backward_fn, nsteps)}s')
print(f'Trapezoidal C++: {timeit(trapezoidal_fn, nsteps)}s')
print(f'Euler forward Pytorch: {timeit(euler_forward_torch_fn, nsteps)}s')
print(f'Euler backward Pytorch: {timeit(euler_backward_torch_fn, nsteps)}s')
print(f'Trapezoidal Pytorch: {timeit(trapezoidal_torch_fn, nsteps)}s')
length = int(1e6)
input = torch.randn(length, dtype=torch.float64)
trap_func_approx_fn = lambda: hippo.legs_function_approx_trapezoidal(input, memorder)
nsteps = 1
print(f'Function approx trapezoidal C++: {timeit(trap_func_approx_fn, nsteps)}s')
if __name__ == "__main__":
benchmark()
| hippo-code-master | tests/test_legs_extension.py |
import math
import unittest
import numpy as np
from scipy import linalg as la
import torch
import torch.nn.functional as F
import hippo
# from .op import transition
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures """
if measure == 'lagt':
# A_l = (1 - dt / 4) * np.eye(N) + dt / 2 * np.tril(np.ones((N, N)))
# A_r = (1 + dt / 4) * np.eye(N) - dt / 2 * np.tril(np.ones((N, N)))
# alpha = dt / 2 / (1 - dt / 4)
# col = -alpha / (1 + alpha) ** np.arange(1, N + 1)
# col[0] += 1
# A_l_inv = la.toeplitz(col / (1 - dt / 4), np.zeros(N))
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
if measure == 'tlagt':
# beta = 1 corresponds to no tilt
# b = measure_args['beta']
b = measure_args.get('beta', 1.0)
A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
elif measure == 'legt':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
return A, B
class LegtTest(unittest.TestCase):
def setUp(self):
self.rtol = 10
self.atol = 1e-3
def test_legt_euler_forward_cpu(self):
batch_size = 10
memsize = 23
memorder = 587
dt = 0.27
# batch_size = 1
# memsize = 1
# memorder = 5
# dt = 0.5
A, B = transition('legt', memorder)
A = torch.Tensor(A)
B = torch.Tensor(B).squeeze(-1)
x = torch.randn(batch_size, memsize, memorder)
input = torch.randn(batch_size, memsize)
out = hippo.legt_euler_forward(x, input, dt)
out_torch = x + dt * F.linear(x, A) + dt * input.unsqueeze(-1) * B
out_double = x.double() + dt * F.linear(x.double(), A.double()) + dt * input.unsqueeze(-1).double() * B.double()
err = (out - out_double).abs().max().item()
err_torch = (out_torch - out_double).abs().max().item()
# print(out_double)
print((out - out_double).abs().max().item())
print((out_torch - out_double).abs().max().item())
self.assertTrue(err <= err_torch * (1 + self.rtol) + self.atol,
((out - out_torch).abs().max().item()))
def timeit(fn, nsteps):
import time
fn()
start = time.perf_counter()
for _ in range(nsteps):
fn()
end = time.perf_counter()
return (end - start) / nsteps
def benchmark():
torch.set_num_threads(1)
batch_size = 1
memsize = 1
memorder = 256
dt = 0.27
A, B = transition('legt', memorder)
A = torch.Tensor(A)
B = torch.Tensor(B).squeeze(-1)
x = torch.randn(batch_size, memsize, memorder)
input = torch.randn(batch_size, memsize)
nsteps = 10000
euler_forward_fn = lambda: hippo.legt_euler_forward(x, input, dt)
euler_forward_torch_fn = lambda: x + dt * F.linear(x, A) + dt * input.unsqueeze(-1) * B
print(f'Euler forward C++: {timeit(euler_forward_fn, nsteps)}s')
print(f'Euler forward Pytorch: {timeit(euler_forward_torch_fn, nsteps)}s')
if __name__ == "__main__":
benchmark()
| hippo-code-master | tests/test_legt_extension.py |
import numpy as np
from keras import backend as K
from keras import activations, initializers
from keras.initializers import Constant, Initializer
from keras.layers import Layer
from scipy import signal
from scipy import linalg as la
import math
import tensorflow as tf
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
if measure == 'tlagt':
# beta = 1 corresponds to no tilt
b = measure_args.get('beta', 1.0)
A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Generalized Laguerre
# alpha 0, beta small is most stable (limits to the 'lagt' measure)
# alpha 0, beta 1 has transition matrix A = [lower triangular 1]
if measure == 'glagt':
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1)
B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None]
L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
A = (1./L[:, None]) * A * L[None, :]
B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# LMU: equivalent to LegT up to normalization
elif measure == 'lmu':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
return A, B
forward_aliases = ['euler', 'forward_euler', 'forward', 'forward_diff']
backward_aliases = ['backward', 'backward_diff', 'backward_euler']
bilinear_aliases = ['bilinear', 'tustin', 'trapezoidal', 'trapezoid']
zoh_aliases = ['zoh']
class HippoTCell(Layer):
def __init__(self,
units,
memory_order,
theta, # relative to dt=1
measure='legt',
method='zoh',
trainable_input_encoders=True,
trainable_hidden_encoders=True,
trainable_memory_encoders=True,
trainable_input_kernel=True,
trainable_hidden_kernel=True,
trainable_memory_kernel=True,
trainable_A=False,
trainable_B=False,
input_encoders_initializer='lecun_uniform',
hidden_encoders_initializer='lecun_uniform',
memory_encoders_initializer=Constant(0), # 'lecun_uniform',
input_kernel_initializer='glorot_normal',
hidden_kernel_initializer='glorot_normal',
memory_kernel_initializer='glorot_normal',
hidden_activation='tanh',
**kwargs):
super().__init__(**kwargs)
self.units = units
self.memory_order = memory_order
self.theta = theta
self.method = method
self.trainable_input_encoders = trainable_input_encoders
self.trainable_hidden_encoders = trainable_hidden_encoders
self.trainable_memory_encoders = trainable_memory_encoders
self.trainable_input_kernel = trainable_input_kernel
self.trainable_hidden_kernel = trainable_hidden_kernel
self.trainable_memory_kernel = trainable_memory_kernel
self.trainable_A = trainable_A
self.trainable_B = trainable_B
self.input_encoders_initializer = initializers.get(
input_encoders_initializer)
self.hidden_encoders_initializer = initializers.get(
hidden_encoders_initializer)
self.memory_encoders_initializer = initializers.get(
memory_encoders_initializer)
self.input_kernel_initializer = initializers.get(
input_kernel_initializer)
self.hidden_kernel_initializer = initializers.get(
hidden_kernel_initializer)
self.memory_kernel_initializer = initializers.get(
memory_kernel_initializer)
self.hidden_activation = activations.get(hidden_activation)
A, B = transition(measure, memory_order)
# Construct A and B matrices
C = np.ones((1, memory_order))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=1./theta, method=method)
self._A = dA - np.eye(memory_order) # puts into form: x += Ax
self._B = dB
self.state_size = (self.units, self.memory_order)
self.output_size = self.units
def build(self, input_shape):
input_dim = input_shape[-1]
self.input_encoders = self.add_weight(
name='input_encoders',
shape=(input_dim, 1),
initializer=self.input_encoders_initializer,
trainable=self.trainable_input_encoders)
self.hidden_encoders = self.add_weight(
name='hidden_encoders',
shape=(self.units, 1),
initializer=self.hidden_encoders_initializer,
trainable=self.trainable_hidden_encoders)
self.memory_encoders = self.add_weight(
name='memory_encoders',
shape=(self.memory_order, 1),
initializer=self.memory_encoders_initializer,
trainable=self.trainable_memory_encoders)
self.input_kernel = self.add_weight(
name='input_kernel',
shape=(input_dim, self.units),
initializer=self.input_kernel_initializer,
trainable=self.trainable_input_kernel)
self.hidden_kernel = self.add_weight(
name='hidden_kernel',
shape=(self.units, self.units),
initializer=self.hidden_kernel_initializer,
trainable=self.trainable_hidden_kernel)
self.memory_kernel = self.add_weight(
name='memory_kernel',
shape=(self.memory_order, self.units),
initializer=self.memory_kernel_initializer,
trainable=self.trainable_memory_kernel)
self.AT = self.add_weight(
name='AT',
shape=(self.memory_order, self.memory_order),
initializer=Constant(self._A.T), # note: transposed
trainable=self.trainable_A)
self.BT = self.add_weight(
name='BT',
shape=(1, self.memory_order), # system is SISO
initializer=Constant(self._B.T), # note: transposed
trainable=self.trainable_B)
self.built = True
def call(self, inputs, states):
h, m = states
u = (K.dot(inputs, self.input_encoders) +
K.dot(h, self.hidden_encoders) +
K.dot(m, self.memory_encoders))
m = m + K.dot(m, self.AT) + K.dot(u, self.BT)
h = self.hidden_activation(
K.dot(inputs, self.input_kernel) +
K.dot(h, self.hidden_kernel) +
K.dot(m, self.memory_kernel))
return h, [h, m]
class HippoSCell(Layer):
def __init__(self,
units,
memory_order,
measure='legt',
method='zoh',
max_length=256,
trainable_input_encoders=True,
trainable_hidden_encoders=True,
trainable_memory_encoders=True,
trainable_input_kernel=True,
trainable_hidden_kernel=True,
trainable_memory_kernel=True,
trainable_A=False,
trainable_B=False,
input_encoders_initializer='lecun_uniform',
hidden_encoders_initializer='lecun_uniform',
memory_encoders_initializer=Constant(0), # 'lecun_uniform',
input_kernel_initializer='glorot_normal',
hidden_kernel_initializer='glorot_normal',
memory_kernel_initializer='glorot_normal',
hidden_activation='tanh',
gate=False,
**kwargs):
super().__init__(**kwargs)
self.units = units
self.memory_order = memory_order
self.method = method
self.max_length = max_length
self.trainable_input_encoders = trainable_input_encoders
self.trainable_hidden_encoders = trainable_hidden_encoders
self.trainable_memory_encoders = trainable_memory_encoders
self.trainable_input_kernel = trainable_input_kernel
self.trainable_hidden_kernel = trainable_hidden_kernel
self.trainable_memory_kernel = trainable_memory_kernel
self.trainable_A = trainable_A
self.trainable_B = trainable_B
self.gate = gate
self.input_encoders_initializer = initializers.get(
input_encoders_initializer)
self.hidden_encoders_initializer = initializers.get(
hidden_encoders_initializer)
self.memory_encoders_initializer = initializers.get(
memory_encoders_initializer)
self.input_kernel_initializer = initializers.get(
input_kernel_initializer)
self.hidden_kernel_initializer = initializers.get(
hidden_kernel_initializer)
self.memory_kernel_initializer = initializers.get(
memory_kernel_initializer)
self.hidden_activation = activations.get(hidden_activation)
A, B = transition(measure, memory_order)
# Construct A and B matrices
A_stacked = np.empty((max_length, memory_order, memory_order), dtype=A.dtype)
B_stacked = np.empty((max_length, memory_order), dtype=B.dtype)
B = B[:,0]
N = memory_order
for t in range(1, max_length + 1):
At = A / t
Bt = B / t
# if discretization in forward_aliases:
if method in forward_aliases:
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
# elif discretization in backward_aliases:
elif method in backward_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif method in bilinear_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
elif method in zoh_aliases:
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
B_stacked = B_stacked[:, :, None]
A_stacked -= np.eye(memory_order) # puts into form: x += Ax
self._A = A_stacked - np.eye(memory_order) # puts into form: x += Ax
self._B = B_stacked
self.state_size = (self.units, self.memory_order, 1)
self.output_size = self.units
def build(self, input_shape):
input_dim = input_shape[-1]
self.input_encoders = self.add_weight(
name='input_encoders',
shape=(input_dim, 1),
initializer=self.input_encoders_initializer,
trainable=self.trainable_input_encoders)
self.hidden_encoders = self.add_weight(
name='hidden_encoders',
shape=(self.units, 1),
initializer=self.hidden_encoders_initializer,
trainable=self.trainable_hidden_encoders)
self.memory_encoders = self.add_weight(
name='memory_encoders',
shape=(self.memory_order, 1),
initializer=self.memory_encoders_initializer,
trainable=self.trainable_memory_encoders)
self.input_kernel = self.add_weight(
name='input_kernel',
shape=(input_dim, self.units),
initializer=self.input_kernel_initializer,
trainable=self.trainable_input_kernel)
if self.trainable_hidden_kernel:
self.hidden_kernel = self.add_weight(
name='hidden_kernel',
shape=(self.units, self.units),
initializer=self.hidden_kernel_initializer,
trainable=self.trainable_hidden_kernel)
else:
self.hidden_kernel = self.add_weight(
name='hidden_kernel',
shape=(self.units, self.units),
initializer=Constant(0.),
trainable=False)
self.memory_kernel = self.add_weight(
name='memory_kernel',
shape=(self.memory_order, self.units),
initializer=self.memory_kernel_initializer,
trainable=self.trainable_memory_kernel)
self.A = self.add_weight(
name='A',
shape=(self.max_length, self.memory_order, self.memory_order),
initializer=Constant(self._A), # note: transposed
trainable=self.trainable_A)
self.B = self.add_weight(
name='B',
shape=(self.max_length, self.memory_order, 1), # system is SISO
initializer=Constant(self._B), # note: transposed
trainable=self.trainable_B)
if self.gate:
self.W_gate = self.add_weight(
name='gate',
shape=(self.units+self.memory_order, self.units), # system is SISO
initializer=initializers.get('glorot_normal'), # note: transposed
trainable=True)
self.built = True
def call(self, inputs, states):
h, m, t = states
tt = tf.cast(t, tf.int32)
tt = tt[0,0]
tt = tf.math.minimum(tt, self.max_length-1)
u = (K.dot(inputs, self.input_encoders) +
K.dot(h, self.hidden_encoders) +
K.dot(m, self.memory_encoders))
m = m + K.dot(m, tf.transpose(self.A[tt])) + K.dot(u, tf.transpose(self.B[tt]))
new_h = self.hidden_activation(
K.dot(inputs, self.input_kernel) +
K.dot(h, self.hidden_kernel) +
K.dot(m, self.memory_kernel))
if self.gate:
g = tf.sigmoid(K.dot(tf.concat([h, m], axis=-1), self.W_gate))
h = (1.-g)*h + g*new_h
else:
h = new_h
return h, [h, m, t+1]
| hippo-code-master | tensorflow/hippo.py |
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from model.memory import LTICell, LSICell
from model.op import transition
class OPLTICell(LTICell):
# name = 'lagt'
measure = None
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1, measure_args={},
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
# A, B = transition(type(self).measure, memory_order)
A, B = transition(type(self).measure, memory_order, **measure_args)
super().__init__(input_size, hidden_size, memory_size, memory_order, A, B, **kwargs)
class OPLSICell(LSICell):
# name = 'lagt'
measure = None
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1, measure_args={},
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
A, B = transition(type(self).measure, memory_order, **measure_args)
super().__init__(input_size, hidden_size, memory_size, memory_order, A, B, **kwargs)
# TODO there should be a way to declare the parent class programatically to avoid duplicating this
# i.e. have a single OPCell that calls the appropriate superclass constructor
# for measure in ['lagt', 'legt', 'legs']:
# type('t'+measure, OPLTICell, {'measure': measure}):
# type('s'+measure, OPLSICell, {'measure': measure}):
class LegendreTranslateCell(OPLTICell):
name = 'legt'
measure = 'legt'
class LegendreTranslateSCell(OPLSICell):
name = 'legts'
measure = 'legt'
class LegendreScaleCell(OPLSICell):
name = 'legs'
measure = 'legs'
class LegendreScaleTCell(OPLTICell):
name = 'legst'
measure = 'legs'
class LaguerreTranslateCell(OPLTICell):
name = 'lagt'
measure = 'lagt'
class LaguerreTranslateSCell(OPLSICell):
name = 'lagts'
measure = 'lagt'
class LMUTCell(OPLTICell):
name = 'lmut'
measure = 'lmu'
class LMUCell(OPLTICell):
name = 'lmu'
measure = 'lmu'
def default_initializers(self):
return {
'uxh': 'uniform',
'ux': 'one',
'uh': 'zero',
'um': 'zero',
'hxm': 'xavier',
'hx': 'zero',
'hh': 'zero',
'hm': 'xavier',
}
def default_architecture(self):
return {
'ux': True,
'um': True,
'hx': True,
'hm': True,
'hh': True,
'bias': False,
}
def __init__(self, input_size, hidden_size, theta=100, dt=1., **kwargs):
super().__init__(input_size, hidden_size, dt=dt/theta, **kwargs)
class LegendreScaleNoiseCell(LTICell):
name = 'legsn'
measure = 'legs'
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1,
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
A, B = transition(type(self).measure, memory_order)
N = memory_order
A = A + np.random.normal(size=(N, N)) / N
super().__init__(input_size, hidden_size, memory_size, memory_order, A, B, **kwargs)
| hippo-code-master | model/opcell.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from scipy import signal
from scipy import linalg as la
from functools import partial
from model.rnncell import RNNCell
from model.orthogonalcell import OrthogonalLinear
from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer
from model.op import LegSAdaptiveTransitionManual, LegTAdaptiveTransitionManual, LagTAdaptiveTransitionManual, TLagTAdaptiveTransitionManual
forward_aliases = ['euler', 'forward_euler', 'forward', 'forward_diff']
backward_aliases = ['backward', 'backward_diff', 'backward_euler']
bilinear_aliases = ['bilinear', 'tustin', 'trapezoidal', 'trapezoid']
zoh_aliases = ['zoh']
class MemoryCell(RNNCell):
name = None
valid_keys = ['uxh', 'ux', 'uh', 'um', 'hxm', 'hx', 'hm', 'hh', 'bias', ]
def default_initializers(self):
return {
'uxh': 'uniform',
'hxm': 'xavier',
'hx': 'xavier',
'hm': 'xavier',
'um': 'zero',
'hh': 'xavier',
}
def default_architecture(self):
return {
'ux': True,
# 'uh': True,
'um': False,
'hx': True,
'hm': True,
'hh': False,
'bias': True,
}
def __init__(self, input_size, hidden_size, memory_size, memory_order,
memory_activation='id',
gate='G', # 'N' | 'G' | UR'
memory_output=False,
**kwargs
):
self.memory_size = memory_size
self.memory_order = memory_order
self.memory_activation = memory_activation
self.gate = gate
self.memory_output = memory_output
super(MemoryCell, self).__init__(input_size, hidden_size, **kwargs)
self.input_to_hidden_size = self.input_size if self.architecture['hx'] else 0
self.input_to_memory_size = self.input_size if self.architecture['ux'] else 0
# Construct and initialize u
self.W_uxh = nn.Linear(self.input_to_memory_size + self.hidden_size, self.memory_size,
bias=self.architecture['bias'])
# nn.init.zeros_(self.W_uxh.bias)
if 'uxh' in self.initializers:
get_initializer(self.initializers['uxh'], self.memory_activation)(self.W_uxh.weight)
if 'ux' in self.initializers: # Re-init if passed in
get_initializer(self.initializers['ux'], self.memory_activation)(self.W_uxh.weight[:, :self.input_size])
if 'uh' in self.initializers: # Re-init if passed in
get_initializer(self.initializers['uh'], self.memory_activation)(self.W_uxh.weight[:, self.input_size:])
# Construct and initialize h
self.memory_to_hidden_size = self.memory_size * self.memory_order if self.architecture['hm'] else 0
preact_ctor = Linear_
preact_args = [self.input_to_hidden_size + self.memory_to_hidden_size, self.hidden_size,
self.architecture['bias']]
self.W_hxm = preact_ctor(*preact_args)
if self.initializers.get('hxm', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hxm'], self.hidden_activation)(self.W_hxm.weight)
if self.initializers.get('hx', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hx'], self.hidden_activation)(self.W_hxm.weight[:, :self.input_size])
if self.initializers.get('hm', None) is not None: # Re-init if passed in
get_initializer(self.initializers['hm'], self.hidden_activation)(self.W_hxm.weight[:, self.input_size:])
if self.architecture['um']:
# No bias here because the implementation is awkward otherwise, but probably doesn't matter
self.W_um = nn.Parameter(torch.Tensor(self.memory_size, self.memory_order))
get_initializer(self.initializers['um'], self.memory_activation)(self.W_um)
if self.architecture['hh']:
self.reset_hidden_to_hidden()
else:
self.W_hh = None
if self.gate is not None:
if self.architecture['hh']:
print("input to hidden size, memory to hidden size, hidden size:", self.input_to_hidden_size, self.memory_to_hidden_size, self.hidden_size)
preact_ctor = Linear_
preact_args = [self.input_to_hidden_size + self.memory_to_hidden_size + self.hidden_size, self.hidden_size,
self.architecture['bias']]
self.W_gxm = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.gate)
def reset_parameters(self):
# super().reset_parameters()
self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size) # TODO figure out how to remove this duplication
self.memory_activation_fn = get_activation(self.memory_activation, self.memory_size)
def forward(self, input, state):
h, m, time_step = state
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
# Construct the update features
memory_preact = self.W_uxh(torch.cat((input_to_memory, h), dim=-1)) # (batch, memory_size)
if self.architecture['um']:
memory_preact = memory_preact + (m * self.W_um).sum(dim=-1)
u = self.memory_activation_fn(memory_preact) # (batch, memory_size)
# Update the memory
m = self.update_memory(m, u, time_step) # (batch, memory_size, memory_order)
# Update hidden state from memory
if self.architecture['hm']:
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
else:
memory_to_hidden = input.new_empty((0,))
m_inputs = (torch.cat((input_to_hidden, memory_to_hidden), dim=-1),)
hidden_preact = self.W_hxm(*m_inputs)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.hidden_activation_fn(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
m_inputs = torch.cat((m_inputs[0], h), -1),
g = self.W_gxm(*m_inputs)
h = (1.-g) * h + g * hidden
next_state = (h, m, time_step + 1)
output = self.output(next_state)
return output, next_state
def update_memory(self, m, u, time_step):
"""
m: (B, M, N) [batch size, memory size, memory order]
u: (B, M)
Output: (B, M, N)
"""
raise NotImplementedError
def default_state(self, input, batch_size=None):
batch_size = input.size(0) if batch_size is None else batch_size
return (input.new_zeros(batch_size, self.hidden_size, requires_grad=False),
input.new_zeros(batch_size, self.memory_size, self.memory_order, requires_grad=False),
0)
def output(self, state):
""" Converts a state into a single output (tensor) """
h, m, time_step = state
if self.memory_output:
hm = torch.cat((h, m.view(m.shape[0], self.memory_size*self.memory_order)), dim=-1)
return hm
else:
return h
def state_size(self):
return self.hidden_size + self.memory_size*self.memory_order
def output_size(self):
if self.memory_output:
return self.hidden_size + self.memory_size*self.memory_order
else:
return self.hidden_size
class LTICell(MemoryCell):
""" A cell implementing Linear Time Invariant dynamics: c' = Ac + Bf. """
def __init__(self, input_size, hidden_size, memory_size, memory_order,
A, B,
trainable_scale=0., # how much to scale LR on A and B
dt=0.01,
discretization='zoh',
**kwargs
):
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
C = np.ones((1, memory_order))
D = np.zeros((1,))
dA, dB, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
dA = dA - np.eye(memory_order) # puts into form: x += Ax
self.trainable_scale = np.sqrt(trainable_scale)
if self.trainable_scale <= 0.:
self.register_buffer('A', torch.Tensor(dA))
self.register_buffer('B', torch.Tensor(dB))
else:
self.A = nn.Parameter(torch.Tensor(dA / self.trainable_scale), requires_grad=True)
self.B = nn.Parameter(torch.Tensor(dB / self.trainable_scale), requires_grad=True)
# TODO: proper way to implement LR scale is a preprocess() function that occurs once per unroll
# also very useful for orthogonal params
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
if self.trainable_scale <= 0.:
return m + F.linear(m, self.A) + F.linear(u, self.B)
else:
return m + F.linear(m, self.A * self.trainable_scale) + F.linear(u, self.B * self.trainable_scale)
class LSICell(MemoryCell):
""" A cell implementing Linear 'Scale' Invariant dynamics: c' = 1/t (Ac + Bf). """
def __init__(self, input_size, hidden_size, memory_size, memory_order,
A, B,
init_t = 0, # 0 for special case at t=0 (new code), else old code without special case
max_length=1024,
discretization='bilinear',
**kwargs
):
"""
# TODO: make init_t start at arbitrary time (instead of 0 or 1)
"""
# B should have shape (N, 1)
assert len(B.shape) == 2 and B.shape[1] == 1
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
assert isinstance(init_t, int)
self.init_t = init_t
self.max_length = max_length
A_stacked = np.empty((max_length, memory_order, memory_order), dtype=A.dtype)
B_stacked = np.empty((max_length, memory_order), dtype=B.dtype)
B = B[:,0]
N = memory_order
for t in range(1, max_length + 1):
At = A / t
Bt = B / t
if discretization in forward_aliases:
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
elif discretization in backward_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif discretization in bilinear_aliases:
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
elif discretization in zoh_aliases:
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
B_stacked = B_stacked[:, :, None]
A_stacked -= np.eye(memory_order) # puts into form: x += Ax
self.register_buffer('A', torch.Tensor(A_stacked))
self.register_buffer('B', torch.Tensor(B_stacked))
def update_memory(self, m, u, time_step):
u = u.unsqueeze(-1) # (B, M, 1)
t = time_step - 1 + self.init_t
if t < 0:
return F.pad(u, (0, self.memory_order - 1))
else:
if t >= self.max_length: t = self.max_length - 1
return m + F.linear(m, self.A[t]) + F.linear(u, self.B[t])
class TimeMemoryCell(MemoryCell):
""" MemoryCell with timestamped data """
def __init__(self, input_size, hidden_size, memory_size, memory_order, **kwargs):
super().__init__(input_size-1, hidden_size, memory_size, memory_order, **kwargs)
def forward(self, input, state):
h, m, time_step = state
timestamp, input = input[:, 0], input[:, 1:]
input_to_hidden = input if self.architecture['hx'] else input.new_empty((0,))
input_to_memory = input if self.architecture['ux'] else input.new_empty((0,))
# Construct the update features
memory_preact = self.W_uxh(torch.cat((input_to_memory, h), dim=-1)) # (batch, memory_size)
if self.architecture['um']:
memory_preact = memory_preact + (m * self.W_um).sum(dim=-1)
u = self.memory_activation_fn(memory_preact) # (batch, memory_size)
# Update the memory
m = self.update_memory(m, u, time_step, timestamp) # (batch, memory_size, memory_order)
# Update hidden state from memory
if self.architecture['hm']:
memory_to_hidden = m.view(input.shape[0], self.memory_size*self.memory_order)
else:
memory_to_hidden = input.new_empty((0,))
m_inputs = (torch.cat((input_to_hidden, memory_to_hidden), dim=-1),)
hidden_preact = self.W_hxm(*m_inputs)
if self.architecture['hh']:
hidden_preact = hidden_preact + self.W_hh(h)
hidden = self.hidden_activation_fn(hidden_preact)
# Construct gate if necessary
if self.gate is None:
h = hidden
else:
if self.architecture['hh']:
m_inputs = torch.cat((m_inputs[0], h), -1),
g = self.W_gxm(*m_inputs)
h = (1.-g) * h + g * hidden
next_state = (h, m, timestamp)
output = self.output(next_state)
return output, next_state
class TimeLSICell(TimeMemoryCell):
""" A cell implementing "Linear Scale Invariant" dynamics: c' = Ac + Bf with timestamped inputs. """
name = 'tlsi'
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1,
measure='legs',
measure_args={},
method='manual',
discretization='bilinear',
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
assert measure in ['legs', 'lagt', 'tlagt', 'legt']
assert method in ['manual', 'linear', 'toeplitz']
if measure == 'legs':
if method == 'manual':
self.transition = LegSAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
if measure == 'legt':
if method == 'manual':
self.transition = LegTAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
elif measure == 'lagt':
if method == 'manual':
self.transition = LagTAdaptiveTransitionManual(self.memory_order)
kwargs = {'precompute': False}
elif measure == 'tlagt':
if method == 'manual':
self.transition = TLagTAdaptiveTransitionManual(self.memory_order, **measure_args)
kwargs = {'precompute': False}
if discretization in forward_aliases:
self.transition_fn = partial(self.transition.forward_diff, **kwargs)
elif discretization in backward_aliases:
self.transition_fn = partial(self.transition.backward_diff, **kwargs)
elif discretization in bilinear_aliases:
self.transition_fn = partial(self.transition.bilinear, **kwargs)
else: assert False
def update_memory(self, m, u, t0, t1):
"""
m: (B, M, N) [batch, memory_size, memory_order]
u: (B, M)
t0: (B,) previous time
t1: (B,) current time
"""
if torch.eq(t1, 0.).any():
return F.pad(u.unsqueeze(-1), (0, self.memory_order - 1))
else:
dt = ((t1-t0)/t1).unsqueeze(-1)
m = self.transition_fn(dt, m, u)
return m
class TimeLTICell(TimeLSICell):
""" A cell implementing Linear Time Invariant dynamics: c' = Ac + Bf with timestamped inputs. """
name = 'tlti'
def __init__(self, input_size, hidden_size, memory_size=1, memory_order=-1,
dt=1.0,
**kwargs
):
if memory_order < 0:
memory_order = hidden_size
self.dt = dt
super().__init__(input_size, hidden_size, memory_size, memory_order, **kwargs)
def update_memory(self, m, u, t0, t1):
"""
m: (B, M, N) [batch, memory_size, memory_order]
u: (B, M)
t0: (B,) previous time
t1: (B,) current time
"""
dt = self.dt*(t1-t0).unsqueeze(-1)
m = self.transition_fn(dt, m, u)
return m
| hippo-code-master | model/memory.py |
import torch
import torch.nn as nn
from model.exprnn.orthogonal import Orthogonal
from model.exprnn.trivializations import expm, cayley_map
from model.exprnn.initialization import henaff_init_, cayley_init_
from model.components import Modrelu
param_name_to_param = {'cayley': cayley_map, 'expm': expm}
init_name_to_init = {'henaff': henaff_init_, 'cayley': cayley_init_}
class OrthogonalLinear(Orthogonal):
def __init__(self, input_size, output_size, method='exprnn', init='cayley', K=100):
""" Wrapper around expRNN's Orthogonal class taking care of parameter names """
if method == "exprnn":
mode = "static"
param = 'expm'
elif method == "dtriv":
# We use 100 as the default to project back to the manifold.
# This parameter does not really affect the convergence of the algorithms, even for K=1
mode = ("dynamic", ortho_args['K'], 100) # TODO maybe K=30? check exprnn codebase
param = 'expm'
elif method == "cayley":
mode = "static"
param = 'cayley'
else:
assert False, f"OrthogonalLinear: orthogonal method {method} not supported"
param = param_name_to_param[param]
init_A = init_name_to_init[init]
super().__init__(input_size, output_size, init_A, mode, param)
class OrthogonalCell(nn.Module):
""" Replacement for expRNN's OrthogonalRNN class
initializer_skew (str): either 'henaff' or 'cayley'
param (str): A parametrization of in terms of skew-symmetyric matrices, either 'cayley' or 'expm'
"""
def __init__(self, input_size, hidden_size, **ortho_args):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.recurrent_kernel = OrthogonalLinear(hidden_size, hidden_size, **ortho_args)
self.input_kernel = nn.Linear(in_features=self.input_size, out_features=self.hidden_size, bias=False)
self.nonlinearity = Modrelu(hidden_size)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.input_kernel.weight.data, nonlinearity="relu")
def forward(self, input, hidden):
input = self.input_kernel(input)
hidden = self.recurrent_kernel(hidden)
out = input + hidden
out = self.nonlinearity(out)
return out, out
def default_state(self, input, batch_size=None):
return input.new_zeros(input.size(0) if batch_size is None else batch_size,
self.hidden_size, requires_grad=False)
def output(self, h):
return h
def state_size(self):
return self.hidden_size
def output_size(self):
return self.hidden_size
def initial_state(self, trainable=False):
""" Return initial state of the RNN
This should not need to see the input as it should be batch size agnostic and automatically broadcasted
# TODO Currently not used
"""
if trainable:
self.initial_state = torch.zeros(self.hidden_size, requires_grad=True)
else:
return torch.zeros(self.hidden_size, requires_grad=True)
| hippo-code-master | model/orthogonalcell.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import signal
import math
# from model.toeplitz import triangular_toeplitz_multiply, triangular_toeplitz_multiply_padded
# from toeplitz import triangular_toeplitz_multiply, triangular_toeplitz_multiply_padded
### Utilities
# TODO bitreversal is duplicated in dataset utils
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_permutation(n):
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
perm = bitreversal_po2(N)
return np.extract(perm < n, perm)
def shift_up(a, s=None, drop=True, dim=0):
assert dim == 0
if s is None:
s = torch.zeros_like(a[0, ...])
s = s.unsqueeze(dim)
if drop:
a = a[:-1, ...]
return torch.cat((s, a), dim=dim)
def interleave(a, b, uneven=False, dim=0):
""" Interleave two tensors of same shape """
# assert(a.shape == b.shape)
assert dim == 0 # TODO temporary to make handling uneven case easier
if dim < 0:
dim = N + dim
if uneven:
a_ = a[-1:, ...]
a = a[:-1, ...]
c = torch.stack((a, b), dim+1)
out_shape = list(a.shape)
out_shape[dim] *= 2
c = c.view(out_shape)
if uneven:
c = torch.cat((c, a_), dim=dim)
return c
def batch_mult(A, u, has_batch=None):
""" Matrix mult A @ u with special case to save memory if u has additional batch dim
The batch dimension is assumed to be the second dimension
A : (L, ..., N, N)
u : (L, [B], ..., N)
has_batch: True, False, or None. If None, determined automatically
Output:
x : (L, [B], ..., N)
A @ u broadcasted appropriately
"""
if has_batch is None:
has_batch = len(u.shape) >= len(A.shape)
if has_batch:
u = u.permute([0] + list(range(2, len(u.shape))) + [1])
else:
u = u.unsqueeze(-1)
v = (A @ u)
if has_batch:
v = v.permute([0] + [len(u.shape)-1] + list(range(1, len(u.shape)-1)))
else:
v = v[..., 0]
return v
### Main unrolling functions
# @profile
def unroll(A, u):
"""
A : (..., N, N)
u : (L, ..., N)
output : x (..., N)
x[i, ...] = A^{i} @ u[0, ...] + ... + A @ u[i-1, ...] + u[i, ...]
"""
m = u.new_zeros(u.shape[1:])
outputs = []
for u_ in torch.unbind(u, dim=0):
m = F.linear(m, A) + u_
outputs.append(m)
output = torch.stack(outputs, dim=0)
return output
# @profile
def parallel_unroll_recursive(A, u):
""" Bottom-up divide-and-conquer version of unroll. """
# Main recursive function
# @profile
def parallel_unroll_recursive_(A, u):
if u.shape[0] == 1:
return u
u_evens = u[0::2, ...]
u_odds = u[1::2, ...]
u2 = F.linear(u_evens, A) + u_odds
A2 = A @ A
x_odds = parallel_unroll_recursive_(A2, u2)
x_evens = F.linear(shift_up(x_odds), A) + u_evens
x = interleave(x_evens, x_odds, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
return parallel_unroll_recursive_(A, u)[:n, ...]
# @profile
def parallel_unroll_recursive_br(A, u):
""" Same as parallel_unroll_recursive but uses bit reversal for locality. """
# Main recursive function
def parallel_unroll_recursive_br_(A, u):
n = u.shape[0]
if n == 1:
return u
m = n//2
u_0 = u[:m, ...]
u_1 = u[m:, ...]
u2 = F.linear(u_0, A) + u_1
A2 = A @ A
x_1 = parallel_unroll_recursive_br_(A2, u2)
x_0 = F.linear(shift_up(x_1), A) + u_0
# x = torch.cat((x_0, x_1), dim=0) # is there a way to do this with cat?
x = interleave(x_0, x_1, dim=0)
return x
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
x = parallel_unroll_recursive_br_(A, u)
return x[:n, ...]
# @profile
def parallel_unroll_iterative(A, u):
""" Bottom-up divide-and-conquer version of unroll, implemented iteratively """
# Pad u to power of 2
n = u.shape[0]
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
u = torch.cat((u, u.new_zeros((N-u.shape[0],) + u.shape[1:] )), dim=0)
# Apply bit reversal
br = bitreversal_po2(N)
u = u[br, ...]
# Main recursive loop, flattened
us = [] # stores the u_0 terms in the recursive version
N_ = N
As = [] # stores the A matrices
for l in range(m):
N_ = N_ // 2
As.append(A)
u_0 = u[:N_, ...]
us.append(u_0)
u = F.linear(u_0, A) + u[N_:, ...]
A = A @ A
x_0 = []
x = u # x_1
for l in range(m-1, -1, -1):
x_0 = F.linear(shift_up(x), As[l]) + us[l]
x = interleave(x_0, x, dim=0)
return x[:n, ...]
# @profile
def variable_unroll_sequential(A, u, s=None, variable=True):
""" Unroll with variable (in time/length) transitions A.
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] @ s + A[i..1] @ u[0] + ... + A[i] @ u[i-1] + u[i]
"""
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
has_batch = len(u.shape) >= len(A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
# s = F.linear(s, A_) + u_
# print("shapes", A_.shape, s.shape, has_batch)
s = batch_mult(A_.unsqueeze(0), s.unsqueeze(0), has_batch)[0]
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
# @profile
def variable_unroll(A, u, s=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll. """
if u.shape[0] <= recurse_limit:
return variable_unroll_sequential(A, u, s, variable)
if s is None:
s = torch.zeros_like(u[0])
uneven = u.shape[0] % 2 == 1
has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = A_1 @ A_0_
# Recursive call
x_1 = variable_unroll(A_10, u_10, s, variable, recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
# @profile
def variable_unroll_general_sequential(A, u, s, op, variable=True):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N, N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (..., N)
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
if not variable:
A = A.expand((u.shape[0],) + A.shape)
outputs = []
for (A_, u_) in zip(torch.unbind(A, dim=0), torch.unbind(u, dim=0)):
s = op(A_, s)
s = s + u_
outputs.append(s)
output = torch.stack(outputs, dim=0)
return output
# @profile
def variable_unroll_matrix_sequential(A, u, s=None, variable=True):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
return variable_unroll_general_sequential(A, u, s, op, variable=True)
# @profile
def variable_unroll_toeplitz_sequential(A, u, s=None, variable=True, pad=False):
if s is None:
s = torch.zeros_like(u[0])
if not variable:
A = A.expand((u.shape[0],) + A.shape)
# has_batch = len(u.shape) >= len(A.shape)
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
# op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0))[0]
if pad:
n = A.shape[-1]
# print("shapes", A.shape, u.shape)
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
# print("shapes", A.shape, u.shape)
ret = variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply_padded, variable=True)
ret = ret[..., :n]
return ret
return variable_unroll_general_sequential(A, u, s, triangular_toeplitz_multiply, variable=True)
### General parallel scan functions with generic binary composition operators
# @profile
def variable_unroll_general(A, u, s, op, compose_op=None, sequential_op=None, variable=True, recurse_limit=16):
""" Bottom-up divide-and-conquer version of variable_unroll.
compose is an optional function that defines how to compose A without multiplying by a leaf u
"""
if u.shape[0] <= recurse_limit:
if sequential_op is None:
sequential_op = op
return variable_unroll_general_sequential(A, u, s, sequential_op, variable)
if compose_op is None:
compose_op = op
uneven = u.shape[0] % 2 == 1
has_batch = len(u.shape) >= len(A.shape)
u_0 = u[0::2, ...]
u_1 = u[1::2, ...]
if variable:
A_0 = A[0::2, ...]
A_1 = A[1::2, ...]
else:
A_0 = A
A_1 = A
u_0_ = u_0
A_0_ = A_0
if uneven:
u_0_ = u_0[:-1, ...]
if variable:
A_0_ = A_0[:-1, ...]
u_10 = op(A_1, u_0_) # batch_mult(A_1, u_0_, has_batch)
u_10 = u_10 + u_1
A_10 = compose_op(A_1, A_0_)
# Recursive call
x_1 = variable_unroll_general(A_10, u_10, s, op, compose_op, sequential_op, variable=variable, recurse_limit=recurse_limit)
x_0 = shift_up(x_1, s, drop=not uneven)
x_0 = op(A_0, x_0) # batch_mult(A_0, x_0, has_batch)
x_0 = x_0 + u_0
x = interleave(x_0, x_1, uneven, dim=0) # For some reason this interleave is slower than in the (non-multi) unroll_recursive
return x
# @profile
def variable_unroll_matrix(A, u, s=None, variable=True, recurse_limit=16):
if s is None:
s = torch.zeros_like(u[0])
has_batch = len(u.shape) >= len(A.shape)
op = lambda x, y: batch_mult(x, y, has_batch)
sequential_op = lambda x, y: batch_mult(x.unsqueeze(0), y.unsqueeze(0), has_batch)[0]
matmul = lambda x, y: x @ y
return variable_unroll_general(A, u, s, op, compose_op=matmul, sequential_op=sequential_op, variable=variable, recurse_limit=recurse_limit)
# @profile
def variable_unroll_toeplitz(A, u, s=None, variable=True, recurse_limit=8, pad=False):
""" Unroll with variable (in time/length) transitions A with general associative operation
A : ([L], ..., N) dimension L should exist iff variable is True
u : (L, [B], ..., N) updates
s : ([B], ..., N) start state
output : x (L, [B], ..., N) same shape as u
x[i, ...] = A[i]..A[0] s + A[i..1] u[0] + ... + A[i] u[i-1] + u[i]
"""
# Add the batch dimension to A if necessary
A_batch_dims = len(A.shape) - int(variable)
u_batch_dims = len(u.shape)-1
if u_batch_dims > A_batch_dims:
assert u_batch_dims == A_batch_dims + 1
if variable:
A = A.unsqueeze(1)
else:
A = A.unsqueeze(0)
if s is None:
s = torch.zeros_like(u[0])
if pad:
n = A.shape[-1]
# print("shapes", A.shape, u.shape)
A = F.pad(A, (0, n))
u = F.pad(u, (0, n))
s = F.pad(s, (0, n))
# print("shapes", A.shape, u.shape)
op = triangular_toeplitz_multiply_padded
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
ret = ret[..., :n]
return ret
op = triangular_toeplitz_multiply
ret = variable_unroll_general(A, u, s, op, compose_op=op, variable=variable, recurse_limit=recurse_limit)
return ret
### Testing
def test_correctness():
print("Testing Correctness\n====================")
# Test sequential unroll
L = 3
A = torch.Tensor([[1, 1], [1, 0]])
u = torch.ones((L, 2))
x = unroll(A, u)
assert torch.isclose(x, torch.Tensor([[1., 1.], [3., 2.], [6., 4.]])).all()
# Test utilities
assert torch.isclose(shift_up(x), torch.Tensor([[0., 0.], [1., 1.], [3., 2.]])).all()
assert torch.isclose(interleave(x, x), torch.Tensor([[1., 1.], [1., 1.], [3., 2.], [3., 2.], [6., 4.], [6., 4.]])).all()
# Test parallel unroll
x = parallel_unroll_recursive(A, u)
assert torch.isclose(x, torch.Tensor([[1., 1.], [3., 2.], [6., 4.]])).all()
# Powers
L = 12
A = torch.Tensor([[1, 0, 0], [2, 1, 0], [3, 3, 1]])
u = torch.ones((L, 3))
x = parallel_unroll_recursive(A, u)
print("recursive", x)
x = parallel_unroll_recursive_br(A, u)
print("recursive_br", x)
x = parallel_unroll_iterative(A, u)
print("iterative_br", x)
A = A.repeat((L, 1, 1))
s = torch.zeros(3)
print("A shape", A.shape)
x = variable_unroll_sequential(A, u, s)
print("variable_unroll", x)
x = variable_unroll(A, u, s)
print("parallel_variable_unroll", x)
def generate_data(L, N, B=None, cuda=True):
A = torch.eye(N) + torch.normal(0, 1, size=(N, N)) / (N**.5) / L
u = torch.normal(0, 1, size=(L, B, N))
# device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
device = torch.device('cuda:0') if cuda else torch.device('cpu')
A = A.to(device)
u = u.to(device)
return A, u
def test_stability():
print("Testing Stability\n====================")
L = 256
N = L // 2
B = 100
A, u = generate_data(L, N, B)
x = unroll(A, u)
x1 = parallel_unroll_recursive(A, u)
x2 = parallel_unroll_recursive_br(A, u)
x3 = parallel_unroll_iterative(A, u)
print("norm error", torch.norm(x-x1))
print("norm error", torch.norm(x-x2))
print("norm error", torch.norm(x-x3))
# print(x-x1)
# print(x-x2)
# print(x-x3)
print("max error", torch.max(torch.abs(x-x1)))
print("max error", torch.max(torch.abs(x-x2)))
print("max error", torch.max(torch.abs(x-x3)))
A = A.repeat((L, 1, 1))
x = variable_unroll_sequential(A, u)
x_ = variable_unroll(A, u)
# x_ = variable_unroll_matrix_sequential(A, u)
x_ = variable_unroll_matrix(A, u)
print(x-x_)
abserr = torch.abs(x-x_)
relerr = abserr/(torch.abs(x)+1e-8)
print("norm abs error", torch.norm(abserr))
print("max abs error", torch.max(abserr))
print("norm rel error", torch.norm(relerr))
print("max rel error", torch.max(relerr))
def test_toeplitz():
from model.toeplitz import krylov_toeplitz_fast
def summarize(name, x, x_, showdiff=False):
print(name, "stats")
if showdiff:
print(x-x_)
abserr = torch.abs(x-x_)
relerr = abserr/(torch.abs(x)+1e-8)
print(" norm abs error", torch.norm(abserr))
print(" max abs error", torch.max(abserr))
print(" norm rel error", torch.norm(relerr))
print(" max rel error", torch.max(relerr))
print("Testing Toeplitz\n====================")
L = 512
N = L // 2
B = 100
A, u = generate_data(L, N, B)
A = A[..., 0]
A = krylov_toeplitz_fast(A)
# print("SHAPES", A.shape, u.shape)
# Static A
x = unroll(A, u)
x_ = variable_unroll(A, u, variable=False)
summarize("nonvariable matrix original", x, x_, showdiff=False)
x_ = variable_unroll_matrix(A, u, variable=False)
summarize("nonvariable matrix general", x, x_, showdiff=False)
x_ = variable_unroll_toeplitz(A[..., 0], u, variable=False)
summarize("nonvariable toeplitz", x, x_, showdiff=False)
# Sequential
A = A.repeat((L, 1, 1))
for _ in range(1):
x_ = variable_unroll_sequential(A, u)
summarize("variable unroll sequential", x, x_, showdiff=False)
x_ = variable_unroll_matrix_sequential(A, u)
summarize("variable matrix sequential", x, x_, showdiff=False)
x_ = variable_unroll_toeplitz_sequential(A[..., 0], u, pad=True)
summarize("variable toeplitz sequential", x, x_, showdiff=False)
# Parallel
for _ in range(1):
x_ = variable_unroll(A, u)
summarize("variable matrix original", x, x_, showdiff=False)
x_ = variable_unroll_matrix(A, u)
summarize("variable matrix general", x, x_, showdiff=False)
x_ = variable_unroll_toeplitz(A[..., 0], u, pad=True, recurse_limit=8)
summarize("variable toeplitz", x, x_, showdiff=False)
# @profile
def test_speed(variable=False, it=1):
print("Testing Speed\n====================")
N = 256
L = 1024
B = 100
A, u = generate_data(L, N, B)
As = A.repeat((L, 1, 1))
u.requires_grad=True
As.requires_grad=True
for _ in range(it):
x = unroll(A, u)
x = torch.sum(x)
x.backward()
x = parallel_unroll_recursive(A, u)
x = torch.sum(x)
x.backward()
# parallel_unroll_recursive_br(A, u)
# parallel_unroll_iterative(A, u)
for _ in range(it):
if variable:
x = variable_unroll_sequential(As, u, variable=True, recurse_limit=16)
x = torch.sum(x)
x.backward()
x = variable_unroll(As, u, variable=True, recurse_limit=16)
x = torch.sum(x)
x.backward()
else:
variable_unroll_sequential(A, u, variable=False, recurse_limit=16)
variable_unroll(A, u, variable=False, recurse_limit=16)
if __name__ == '__main__':
# test_correctness()
test_stability()
# test_toeplitz()
# test_speed(variable=True, it=100)
| hippo-code-master | model/unroll.py |
import torch
import torch.nn as nn
from functools import partial
from model.rnn import RNN, RNNWrapper, LSTMWrapper
from model import rnncell, opcell # TODO: this is just to force cell_registry to update. There is probably a better programming pattern for this
from model.rnncell import CellBase
from model.orthogonalcell import OrthogonalCell
class Model(nn.Module):
def __init__(
self,
input_size,
output_size,
output_len=0,
cell='lstm',
cell_args={},
output_hiddens=[],
embed_args=None,
preprocess=None,
ff=False,
dropout=0.0,
split=0,
):
super(Model, self).__init__()
# Save arguments needed for forward pass
self.input_size = input_size
self.output_size = output_size
self.output_len = output_len
assert output_len >= 0, f"output_len {output_len} should be 0 to return just the state or >0 to return the last output tokens"
self.dropout = dropout
self.split = split
cell_args['input_size'] = input_size
if embed_args is not None:
self.embed_dim = embed_args['embed_dim']
self.embedding = nn.Embedding(input_size, self.embed_dim)
cell_args['input_size'] = self.embed_dim
### Handle optional Hippo preprocessing
self.preprocess = preprocess
if self.preprocess is not None:
assert isinstance(self.preprocess, dict)
assert 'order' in self.preprocess
assert 'measure' in self.preprocess
self.hippo = VariableMemoryProjection(**self.preprocess)
cell_args['input_size'] *= (self.preprocess['order']+1) # will append this output to original channels
### Construct main RNN
if ff: # feedforward model
cell_args['input_size'] = input_size
self.rnn = QRNN(**cell_args)
else:
# Initialize proper cell type
if cell == 'lstm':
self.rnn = LSTMWrapper(**cell_args, dropout=self.dropout)
else:
if cell in CellBase.registry:
cell_ctor = CellBase.registry[cell]
elif cell == 'orthogonal':
cell_ctor = OrthogonalCell
else:
assert False, f"cell {cell} not supported"
self.rnn = RNN(cell_ctor(**cell_args), dropout=self.dropout)
if self.split > 0:
self.initial_rnn = RNN(cell_ctor(**cell_args), dropout=self.dropout)
### Construct output head
sizes = [self.rnn.output_size()] + output_hiddens + [output_size]
self.output_mlp = nn.Sequential(*[nn.Linear(sizes[i], sizes[i+1]) for i in range(len(sizes)-1)])
# @profile
def forward(self, inputs, len_batch=None):
B, L, C = inputs.shape
inputs = inputs.transpose(0, 1) # .unsqueeze(-1) # (seq_length, batch, channels)
# Apply Hippo preprocessing if necessary
if self.preprocess is not None:
p = self.hippo(inputs)
p = p.reshape(L, B, self.input_size * self.preprocess['order'])
inputs = torch.cat([inputs, p], dim=-1)
# Handle embedding
if hasattr(self, 'embedding'):
inputs = self.embedding(inputs)
if len_batch is not None:
inputs = nn.utils.rnn.pack_padded_sequence(inputs, len_batch, enforce_sorted=False)
# Option to have separate RNN for head of sequence, mostly for debugging gradients etc
if self.split > 0:
initial_inputs, inputs = inputs[:self.split], inputs[self.split:]
_, initial_state = self.initial_rnn(initial_inputs, return_output=False)
else:
initial_state = None
# Apply main RNN
if self.output_len > 0:
outputs, _ = self.rnn(inputs, init_state=initial_state, return_output=True)
# get last output tokens
outputs = outputs[-self.output_len:,:,:]
outputs = outputs.transpose(0, 1)
return self.output_mlp(outputs)
else:
_, state = self.rnn(inputs, init_state=initial_state, return_output=False)
state = self.rnn.output(state)
return self.output_mlp(state)
| hippo-code-master | model/model.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import signal
from scipy import linalg as la
from scipy import special as ss
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures.
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
if measure == 'tlagt':
# beta = 1 corresponds to no tilt
b = measure_args.get('beta', 1.0)
A = (1.-b)/2 * np.eye(N) - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Generalized Laguerre
# alpha 0, beta small is most stable (limits to the 'lagt' measure)
# alpha 0, beta 1 has transition matrix A = [lower triangular 1]
if measure == 'glagt':
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1)
B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None]
L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
A = (1./L[:, None]) * A * L[None, :]
B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# LMU: equivalent to LegT up to normalization
elif measure == 'lmu':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
return A, B
class AdaptiveTransition(nn.Module):
def precompute_forward(self):
raise NotImplementedError
def precompute_backward(self):
raise NotImplementedError
def forward_mult(self, u, delta):
""" Computes (I + delta A) u
A: (n, n)
u: (..., n)
delta: (...) or scalar
output: (..., n)
"""
raise NotImplementedError
def inverse_mult(self, u, delta): # TODO swap u, delta everywhere
""" Computes (I - d A)^-1 u """
raise NotImplementedError
# @profile
def forward_diff(self, d, u, v, **kwargs):
""" Computes the 'forward diff' or Euler update rule: (I - d A)^-1 u + d B v
d: (...)
u: (..., n)
v: (...)
"""
# TODO F.linear should be replaced by broadcasting, self.B shouldl be shape (n) instead of (n, 1)
# x = self.forward_mult(u, d) + dt * F.linear(v.unsqueeze(-1), self.B)
v = d * v
v = v.unsqueeze(-1) * self.B
x = self.forward_mult(u, d, **kwargs)
x = x + v
return x
# @profile
def backward_diff(self, d, u, v, **kwargs):
""" Computes the 'forward diff' or Euler update rule: (I - d A)^-1 u + d (I - d A)^-1 B v
d: (...)
u: (..., n)
v: (...)
"""
v = d * v
v = v.unsqueeze(-1) * self.B
x = u + v
x = self.inverse_mult(x, d, **kwargs)
return x
# @profile
def bilinear(self, dt, u, v, alpha=.5, **kwargs):
""" Computes the bilinear (aka trapezoid or Tustin's) update rule.
(I - d/2 A)^-1 (I + d/2 A) u + d B (I - d/2 A)^-1 B v
"""
x = self.forward_mult(u, (1-alpha)*dt, **kwargs)
v = dt * v
v = v.unsqueeze(-1) * self.B
x = x + v
x = self.inverse_mult(x, (alpha)*dt, **kwargs)
return x
def zoh(self, dt, u, v):
raise NotImplementedError
def precompute(self, deltas):
""" deltas: list of step sizes """
for delta in deltas:
# self.forward_cache[delta] = self.precompute_forward(delta)
# self.backward_cache[delta] = self.precompute_backward(delta)
# TODO being lazy here; should check whether bilinear rule is being used
self.forward_cache[delta/2] = self.precompute_forward(delta/2)
self.backward_cache[delta/2] = self.precompute_backward(delta/2)
class ManualAdaptiveTransition(AdaptiveTransition):
def __init__(self, N, **kwargs):
""" Slow (n^3, or n^2 if step sizes are cached) version via manual matrix mult/inv
delta: optional list of step sizes to cache the transitions for
"""
super().__init__()
A, B = transition(type(self).measure, N, **kwargs)
self.N = N
self.register_buffer('A', torch.Tensor(A))
self.register_buffer('B', torch.Tensor(B[:, 0]))
self.register_buffer('I', torch.eye(self.N))
# Precompute stacked A, B matrix for zoh computation
AB = torch.cat((self.A, self.B.unsqueeze(-1)), dim=-1)
AB = torch.cat((AB, torch.zeros((1, N+1))), dim=0)
self.register_buffer('AB', AB)
self.forward_cache = {}
self.backward_cache = {}
print(f"ManualAdaptiveTransition:\n A {self.A}\nB {self.B}")
def precompute_forward(self, delta):
return self.I + delta*self.A
def precompute_backward(self, delta):
return torch.triangular_solve(self.I, self.I - delta*self.A, upper=False)[0]
def precompute_exp(self, delta):
# NOTE this does not work because torch has no matrix exponential yet, support ongoing:
# https://github.com/pytorch/pytorch/issues/9983
e = torch.expm(delta * self.AB)
return e[:-1, :-1], e[:-1, -1]
# @profile
def forward_mult(self, u, delta, precompute=True):
""" Computes (I + d A) u
A: (n, n)
u: (b1* d, n) d represents memory_size
delta: (b2*, d) or scalar
Assume len(b2) <= len(b1)
output: (broadcast(b1, b2)*, d, n)
"""
# For forward Euler, precompute materializes the matrix
if precompute:
if isinstance(delta, torch.Tensor):
delta = delta.unsqueeze(-1).unsqueeze(-1)
# print(delta, isinstance(delta, float), delta in self.forward_cache)
if isinstance(delta, float) and delta in self.forward_cache:
mat = self.forward_cache[delta]
else:
mat = self.precompute_forward(delta)
if len(u.shape) >= len(mat.shape):
# For memory efficiency, leverage extra batch dimensions
s = len(u.shape)
# TODO can make the permutation more efficient by just permuting the last 2 or 3 dim, but need to do more casework)
u = u.permute(list(range(1, s)) + [0])
x = mat @ u
x = x.permute([s-1] + list(range(s-1)))
else:
x = (mat @ u.unsqueeze(-1))[..., 0]
# x = F.linear(u, mat)
else:
if isinstance(delta, torch.Tensor):
delta = delta.unsqueeze(-1)
x = F.linear(u, self.A)
x = u + delta * x
return x
# @profile
def inverse_mult(self, u, delta, precompute=True):
""" Computes (I - d A)^-1 u """
if isinstance(delta, torch.Tensor):
delta = delta.unsqueeze(-1).unsqueeze(-1)
if precompute:
if isinstance(delta, float) and delta in self.backward_cache:
mat = self.backward_cache[delta]
else:
mat = self.precompute_backward(delta) # (n, n) or (..., n, n)
if len(u.shape) >= len(mat.shape):
# For memory efficiency, leverage extra batch dimensions
s = len(u.shape)
# TODO can make the permutation more efficient by just permuting the last 2 or 3 dim, but need to do more casework
u = u.permute(list(range(1, s)) + [0])
x = mat @ u
x = x.permute([s-1] + list(range(s-1)))
else:
x = (mat @ u.unsqueeze(-1))[..., 0]
else:
_A = self.I - delta*self.A
x = torch.triangular_solve(u.unsqueeze(-1), _A, upper=False)[0]
x = x[..., 0]
return x
def zoh(self, dt, u, v):
dA, dB = self.precompute_exp(dt)
return F.linear(u, dA) + dB * v.unsqueeze(-1)
class LegSAdaptiveTransitionManual(ManualAdaptiveTransition):
measure = 'legs'
class LegTAdaptiveTransitionManual(ManualAdaptiveTransition):
measure = 'legt'
class LagTAdaptiveTransitionManual(ManualAdaptiveTransition):
measure = 'lagt'
class TLagTAdaptiveTransitionManual(ManualAdaptiveTransition):
measure = 'tlagt'
| hippo-code-master | model/op.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data as data
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from scipy import linalg as la
from scipy import special as ss
import nengo
from model import unroll
from model.op import transition
"""
The HiPPO_LegT and HiPPO_LegS modules satisfy the HiPPO interface:
The forward() method takes an input sequence f of length L to an output sequence c of shape (L, N) where N is the order of the HiPPO operator.
c[k] can be thought of as representing all of f[:k] via coefficients of a polynomial approximation.
The reconstruct() method takes the coefficients and turns each coefficient into a reconstruction of the original input.
Note that each coefficient c[k] turns into an approximation of the entire input f, so this reconstruction has shape (L, L),
and the last element of this reconstruction (which has shape (L,)) is the most accurate reconstruction of the original input.
Both of these two methods construct approximations according to different measures, defined in the HiPPO paper.
The first one is the "Translated Legendre" (which is up to scaling equal to the LMU matrix),
and the second one is the "Scaled Legendre".
Each method comprises an exact recurrence c_k = A_k c_{k-1} + B_k f_k, and an exact reconstruction formula based on the corresponding polynomial family.
"""
class HiPPO_LegT(nn.Module):
def __init__(self, N, dt=1.0, discretization='bilinear'):
"""
N: the order of the HiPPO projection
dt: discretization step size - should be roughly inverse to the length of the sequence
"""
super().__init__()
self.N = N
A, B = transition('lmu', N)
C = np.ones((1, N))
D = np.zeros((1,))
# dt, discretization options
A, B, _, _, _ = signal.cont2discrete((A, B, C, D), dt=dt, method=discretization)
B = B.squeeze(-1)
self.register_buffer('A', torch.Tensor(A)) # (N, N)
self.register_buffer('B', torch.Tensor(B)) # (N,)
# vals = np.linspace(0.0, 1.0, 1./dt)
vals = np.arange(0.0, 1.0, dt)
self.eval_matrix = torch.Tensor(ss.eval_legendre(np.arange(N)[:, None], 1 - 2 * vals).T)
def forward(self, inputs):
"""
inputs : (length, ...)
output : (length, ..., N) where N is the order of the HiPPO projection
"""
inputs = inputs.unsqueeze(-1)
u = inputs * self.B # (length, ..., N)
c = torch.zeros(u.shape[1:])
cs = []
for f in inputs:
c = F.linear(c, self.A) + self.B * f
cs.append(c)
return torch.stack(cs, dim=0)
def reconstruct(self, c):
return (self.eval_matrix @ c.unsqueeze(-1)).squeeze(-1)
class HiPPO_LegS(nn.Module):
""" Vanilla HiPPO-LegS model (scale invariant instead of time invariant) """
def __init__(self, N, max_length=1024, measure='legs', discretization='bilinear'):
"""
max_length: maximum sequence length
"""
super().__init__()
self.N = N
A, B = transition(measure, N)
B = B.squeeze(-1)
A_stacked = np.empty((max_length, N, N), dtype=A.dtype)
B_stacked = np.empty((max_length, N), dtype=B.dtype)
for t in range(1, max_length + 1):
At = A / t
Bt = B / t
if discretization == 'forward':
A_stacked[t - 1] = np.eye(N) + At
B_stacked[t - 1] = Bt
elif discretization == 'backward':
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, np.eye(N), lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At, Bt, lower=True)
elif discretization == 'bilinear':
A_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, np.eye(N) + At / 2, lower=True)
B_stacked[t - 1] = la.solve_triangular(np.eye(N) - At / 2, Bt, lower=True)
else: # ZOH
A_stacked[t - 1] = la.expm(A * (math.log(t + 1) - math.log(t)))
B_stacked[t - 1] = la.solve_triangular(A, A_stacked[t - 1] @ B - B, lower=True)
self.A_stacked = torch.Tensor(A_stacked) # (max_length, N, N)
self.B_stacked = torch.Tensor(B_stacked) # (max_length, N)
# print("B_stacked shape", B_stacked.shape)
vals = np.linspace(0.0, 1.0, max_length)
self.eval_matrix = torch.Tensor((B[:, None] * ss.eval_legendre(np.arange(N)[:, None], 2 * vals - 1)).T)
def forward(self, inputs, fast=False):
"""
inputs : (length, ...)
output : (length, ..., N) where N is the order of the HiPPO projection
"""
L = inputs.shape[0]
inputs = inputs.unsqueeze(-1)
u = torch.transpose(inputs, 0, -2)
u = u * self.B_stacked[:L]
u = torch.transpose(u, 0, -2) # (length, ..., N)
if fast:
result = unroll.variable_unroll_matrix(self.A_stacked[:L], u)
else:
result = unroll.variable_unroll_matrix_sequential(self.A_stacked[:L], u)
return result
def reconstruct(self, c):
a = self.eval_matrix @ c.unsqueeze(-1)
return a.squeeze(-1)
class FunctionApprox(data.TensorDataset):
def __init__(self, length, dt, nbatches, freq=10.0, seed=0):
rng = np.random.RandomState(seed=seed)
process = nengo.processes.WhiteSignal(length * dt, high=freq, y0=0)
X = np.empty((nbatches, length, 1))
for i in range(nbatches):
X[i, :] = process.run_steps(length, dt=dt, rng=rng)
# X[i, :] /= np.max(np.abs(X[i, :]))
X = torch.Tensor(X)
super().__init__(X, X)
def test():
N = 256
L = 128
hippo = HiPPO_LegT(N, dt=1./L)
x = torch.randn(L, 1)
y = hippo(x)
print(y.shape)
z = hippo.reconstruct(y)
print(z.shape)
# mse = torch.mean((z[-1,0,:L].flip(-1) - x.squeeze(-1))**2)
mse = torch.mean((z[-1,0,:L] - x.squeeze(-1))**2)
print(mse)
# print(y.shape)
hippo_legs = HiPPO_LegS(N, max_length=L)
y = hippo_legs(x)
# print(y.shape)
z = hippo_legs(x, fast=True)
print(hippo_legs.reconstruct(z).shape)
# print(y-z)
def plot():
T = 10000
dt = 1e-3
N = 256
nbatches = 10
train = FunctionApprox(T, dt, nbatches, freq=1.0, seed=0)
test = FunctionApprox(T, dt, nbatches, freq=1.0, seed=1)
test_loader = torch.utils.data.DataLoader(test, batch_size=1, shuffle=False)
it = iter(test_loader)
f, _ = next(it)
f, _ = next(it)
f = f.squeeze(0).squeeze(-1)
legt = HiPPO_LegT(N, 1./T)
f_legt = legt.reconstruct(legt(f))[-1]
legs = HiPPO_LegS(N, T)
f_legs = legs.reconstruct(legs(f))[-1]
print(F.mse_loss(f, f_legt))
print(F.mse_loss(f, f_legs))
vals = np.linspace(0.0, 1.0, T)
plt.figure(figsize=(6, 2))
plt.plot(vals, f+0.1, 'k', linewidth=1.0)
plt.plot(vals[:T//1], f_legt[:T//1])
plt.plot(vals[:T//1], f_legs[:T//1])
plt.xlabel('Time (normalized)', labelpad=-10)
plt.xticks([0, 1])
plt.legend(['f', 'legt', 'legs'])
plt.savefig(f'function_approx_whitenoise.pdf', bbox_inches='tight')
# plt.show()
plt.close()
if __name__ == '__main__':
plot()
| hippo-code-master | model/hippo.py |
""" Baseline RNN cells such as the vanilla RNN and GRU. """
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.components import Gate, Linear_, Modrelu, get_activation, get_initializer
from model.orthogonalcell import OrthogonalLinear
class CellBase(nn.Module):
""" Abstract class for our recurrent cell interface.
Passes input through
"""
registry = {}
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Only register classes with @name attribute
if hasattr(cls, 'name') and cls.name is not None:
cls.registry[cls.name] = cls
name = 'id'
valid_keys = []
def default_initializers(self):
return {}
def default_architecture(self):
return {}
def __init__(self, input_size, hidden_size, initializers=None, architecture=None):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.architecture = self.default_architecture()
self.initializers = self.default_initializers()
if initializers is not None:
self.initializers.update(initializers)
print("Initializers:", initializers)
if architecture is not None:
self.architecture.update(architecture)
assert set(self.initializers.keys()).issubset(self.valid_keys)
assert set(self.architecture.keys()).issubset(self.valid_keys)
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, input, hidden):
return input, input
def default_state(self, input, batch_size=None):
return input.new_zeros(input.size(0) if batch_size is None else batch_size,
self.hidden_size, requires_grad=False)
def output(self, h):
return h
def state_size(self):
return self.hidden_size
def output_size(self):
return self.hidden_size
def initial_state(self, trainable=False):
""" Return initial state of the RNN
This should not need to see the input as it should be batch size agnostic and automatically broadcasted
# TODO Currently not used
"""
if trainable:
self.initial_state = torch.zeros(self.hidden_size, requires_grad=True)
else:
return torch.zeros(self.hidden_size, requires_grad=True)
class RNNCell(CellBase):
name = 'rnn'
valid_keys = ['hx', 'hh', 'bias']
def default_initializers(self):
return {
'hx': 'xavier',
'hh': 'xavier',
}
def default_architecture(self):
return {
'bias': True,
}
def __init__(self, input_size, hidden_size,
hidden_activation='tanh',
orthogonal=False,
ortho_args=None,
zero_bias_init=False,
**kwargs
):
self.hidden_activation = hidden_activation
self.orthogonal = orthogonal
self.ortho_args = ortho_args
self.zero_bias_init=zero_bias_init
super().__init__(input_size, hidden_size,
**kwargs,
)
def reset_parameters(self):
self.W_hx = Linear_(self.input_size, self.hidden_size, bias=self.architecture['bias'], zero_bias_init=self.zero_bias_init)
get_initializer(self.initializers['hx'], self.hidden_activation)(self.W_hx.weight)
self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size)
self.reset_hidden_to_hidden()
def reset_hidden_to_hidden(self):
if self.orthogonal:
if self.ortho_args is None:
self.ortho_args = {}
self.ortho_args['input_size'] = self.hidden_size
self.ortho_args['output_size'] = self.hidden_size
self.W_hh = OrthogonalLinear(**self.ortho_args)
else:
self.W_hh = nn.Linear(self.hidden_size, self.hidden_size, bias=self.architecture['bias'])
get_initializer(self.initializers['hh'], self.hidden_activation)(self.W_hh.weight)
def forward(self, input, h):
### Update hidden state
hidden_preact = self.W_hx(input) + self.W_hh(h)
hidden = self.hidden_activation_fn(hidden_preact)
return hidden, hidden
class GatedRNNCell(RNNCell):
name = 'gru'
def __init__(self, input_size, hidden_size,
gate='G', # 'N' | 'G'
reset='N',
**kwargs
):
self.gate = gate
self.reset = reset
super().__init__(input_size, hidden_size, **kwargs)
def reset_parameters(self):
super().reset_parameters()
preact_ctor = Linear_
preact_args = [self.input_size + self.hidden_size, self.hidden_size, self.architecture['bias']]
self.W_g = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.gate)
self.W_reset = Gate(self.hidden_size, preact_ctor, preact_args, mechanism=self.reset)
def forward(self, input, h):
hx = torch.cat((input, h), dim=-1)
reset = self.W_reset(hx)
_, update = super().forward(input, reset*h)
g = self.W_g(hx)
h = (1.-g) * h + g * update
return h, h
class MinimalRNNCell(CellBase):
name = 'mrnn'
valid_keys = ['hx', 'bias']
def default_initializers(self):
return {
'hx': 'xavier',
}
def default_architecture(self):
return {
'bias': True,
}
def __init__(self, input_size, hidden_size,
hidden_activation='tanh',
orthogonal=False,
ortho_args=None,
zero_bias_init=False,
**kwargs
):
self.hidden_activation = hidden_activation
self.zero_bias_init=zero_bias_init
super().__init__(input_size, hidden_size,
**kwargs,
)
def reset_parameters(self):
self.W_hx = Linear_(self.input_size, self.hidden_size, bias=self.architecture['bias'], zero_bias_init=self.zero_bias_init)
get_initializer(self.initializers['hx'], self.hidden_activation)(self.W_hx.weight)
self.hidden_activation_fn = get_activation(self.hidden_activation, self.hidden_size)
preact_ctor = Linear_
preact_args = [self.input_size + self.hidden_size, self.hidden_size, self.architecture['bias']]
self.W_g = Gate(self.hidden_size, preact_ctor, preact_args, mechanism='G')
def forward(self, input, h):
### Update hidden state
hidden_preact = self.W_hx(input)
hidden = self.hidden_activation_fn(hidden_preact)
hx = torch.cat((input, h), dim=-1)
g = self.W_g(hx)
h = (1.-g) * h + g * hidden
return h, h
class GatedSRNNCell(GatedRNNCell):
name = 'grus'
def __init__(self, input_size, hidden_size,
**kwargs
):
super().__init__(input_size, hidden_size, **kwargs)
def reset_parameters(self):
super().reset_parameters()
def forward(self, input, hidden):
hidden, t = hidden
hx = torch.cat((input, hidden), dim=-1)
reset = self.W_reset(hx)
_, update = super().forward(input, reset*hidden)
g = self.W_g(hx)
g = g * 1. / (t+1)
h = (1.-g) * hidden + g * update
return h, (h, t+1)
def default_state(self, input, batch_size=None):
batch_size = input.size(0) if batch_size is None else batch_size
return (input.new_zeros(batch_size, self.hidden_size, requires_grad=False),
0)
def output(self, state):
""" Converts a state into a single output (tensor) """
h, t = state
return h
class ExpRNNCell(RNNCell):
""" Note: there is a subtle distinction between this and the ExpRNN original cell (now implemented as orthogonalcell.OrthogonalCell) in the initialization of hx, but this shouldn't matter """
name = 'exprnn'
def __init__(self, input_size, hidden_size, **kwargs):
super().__init__(input_size, hidden_size, orthogonal=True, hidden_activation='modrelu', **kwargs)
| hippo-code-master | model/rnncell.py |
from functools import partial
import torch
import torch.nn as nn
from model.exprnn.orthogonal import modrelu
def get_activation(activation, size):
if activation == 'id':
return nn.Identity()
elif activation == 'tanh':
return torch.tanh
elif activation == 'relu':
return torch.relu
elif activation == 'sigmoid':
return torch.sigmoid
elif activation == 'modrelu':
return Modrelu(size)
else:
raise NotImplementedError("hidden activation '{}' is not implemented".format(activation))
def get_initializer(name, activation):
if activation in ['id', 'identity', 'linear', 'modrelu']:
nonlinearity = 'linear'
elif activation in ['relu', 'tanh', 'sigmoid']:
nonlinearity = activation
else:
assert False, f"get_initializer: activation {activation} not supported"
if name == 'uniform':
initializer = partial(torch.nn.init.kaiming_uniform_, nonlinearity=nonlinearity)
elif name == 'normal':
initializer = partial(torch.nn.init.kaiming_normal_, nonlinearity=nonlinearity)
elif name == 'xavier':
initializer = torch.nn.init.xavier_normal_
elif name == 'zero':
initializer = partial(torch.nn.init.constant_, val=0)
elif name == 'one':
initializer = partial(torch.nn.init.constant_, val=1)
else:
assert False, f"get_initializer: initializer type {name} not supported"
return initializer
class Modrelu(modrelu):
def reset_parameters(self):
self.b.data.uniform_(-0.0, 0.0)
def Linear_(input_size, output_size, bias, init='normal', zero_bias_init=False, **kwargs):
""" Returns a nn.Linear module with initialization options """
l = nn.Linear(input_size, output_size, bias=bias, **kwargs)
get_initializer(init, 'linear')(l.weight)
if bias and zero_bias_init:
nn.init.zeros_(l.bias)
return l
class Gate(nn.Module):
""" Implements gating mechanisms.
Mechanisms:
N - No gate
G - Standard sigmoid gate
"""
def __init__(self, size, preact_ctor, preact_args, mechanism='N'):
super().__init__()
self.size = size
self.mechanism = mechanism
if self.mechanism == 'N':
pass
elif self.mechanism == 'G':
self.W_g = preact_ctor(*preact_args)
else:
assert False, f'Gating type {self.mechanism} is not supported.'
def forward(self, *inputs):
if self.mechanism == 'N':
return 1.0
if self.mechanism == 'G':
g_preact = self.W_g(*inputs)
g = torch.sigmoid(g_preact)
return g
| hippo-code-master | model/components.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
def apply_tuple(tup, fn):
"""Apply a function to a Tensor or a tuple of Tensor
"""
if isinstance(tup, tuple):
return tuple((fn(x) if isinstance(x, torch.Tensor) else x) for x in tup)
else:
return fn(tup)
def concat_tuple(tups, dim=0):
"""Concat a list of Tensors or a list of tuples of Tensor
"""
if isinstance(tups[0], tuple):
return tuple((torch.cat(xs, dim) if isinstance(xs[0], torch.Tensor) else xs[0]) for xs in zip(*tups))
else:
return torch.cat(tups, dim)
class RNN(nn.Module):
def __init__(self, cell, dropout=0.0):
super().__init__()
self.cell = cell
if dropout > 0.0:
self.use_dropout = True
self.drop_prob = dropout
self.dropout = nn.Dropout(p=dropout)
else:
self.use_dropout = False
def forward(self, inputs, init_state=None, return_output=False):
"""
cell.forward : (input, state) -> (output, state)
inputs : [length, batch, dim]
"""
# Similar implementation to https://github.com/pytorch/pytorch/blob/9e94e464535e768ad3444525aecd78893504811f/torch/nn/modules/rnn.py#L202
is_packed = isinstance(inputs, nn.utils.rnn.PackedSequence)
if is_packed:
inputs, batch_sizes, sorted_indices, unsorted_indices = inputs
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
max_batch_size = inputs.size(1)
sorted_indices = None
unsorted_indices = None
# Construct initial state
if init_state is None:
state = self.cell.default_state(inputs[0], max_batch_size)
else:
state = apply_tuple(init_state, lambda x: x[sorted_indices] if sorted_indices is not None else x)
# Construct recurrent dropout masks
if self.use_dropout:
input_dropout = self.dropout(torch.ones(max_batch_size, self.cell.input_size, device=inputs.device))
recurrent_dropout = self.dropout(torch.ones(max_batch_size, self.cell.hidden_size, device=inputs.device))
output_dropout = self.dropout(torch.ones(max_batch_size, self.output_size(), device=inputs.device))
outputs = []
if not is_packed:
for input in torch.unbind(inputs, dim=0):
if self.use_dropout:
## Recurrent Dropout
input = input * input_dropout
output, new_state = self.cell.forward(input, state)
if self.use_dropout:
output = output * output_dropout
try:
state = (self.dropout(new_state[0]),) + new_state[1:] # TODO not general
except:
state = self.dropout(new_state)
else:
state = new_state
if return_output:
outputs.append(output)
return torch.stack(outputs) if return_output else None, state
else:
# Following implementation at https://github.com/pytorch/pytorch/blob/9e94e464535e768ad3444525aecd78893504811f/aten/src/ATen/native/RNN.cpp#L621
# Batch sizes is a sequence of decreasing lengths, which are offsets
# into a 1D list of inputs. At every step we slice out batch_size elements,
# and possibly account for the decrease in the batch size since the last step,
# which requires us to slice the hidden state (since some sequences
# are completed now). The sliced parts are also saved, because we will need
# to return a tensor of final hidden state.
batch_sizes_og = batch_sizes
batch_sizes = batch_sizes.detach().cpu().numpy()
input_offset = 0
last_batch_size = batch_sizes[0]
saved_states = []
for batch_size in batch_sizes:
step_input = inputs[input_offset:input_offset + batch_size]
input_offset += batch_size
dec = last_batch_size - batch_size
if (dec > 0):
saved_state = apply_tuple(state, lambda x: x[batch_size:])
state = apply_tuple(state, lambda x: x[:batch_size])
saved_states.append(saved_state)
last_batch_size = batch_size
if self.use_dropout:
step_input = step_input * input_dropout[:batch_size]
output, new_state = self.cell.forward(step_input, state)
if self.use_dropout:
output = output * output_dropout[:batch_size]
try:
state = (self.dropout(new_state[0]),) + new_state[1:] # TODO not general
except:
state = self.dropout(new_state)
else:
state = new_state
if return_output:
outputs.append(output)
saved_states.append(state)
saved_states.reverse()
state = concat_tuple(saved_states)
state = apply_tuple(state, lambda x: x[unsorted_indices] if unsorted_indices is not None else x)
if return_output:
outputs = nn.utils.rnn.PackedSequence(torch.cat(outputs, dim=0), batch_sizes_og, sorted_indices, unsorted_indices)
else:
outputs = None
return outputs, state
def state_size(self):
return self.cell.state_size()
def output_size(self):
return self.cell.output_size()
def output(self, state):
return self.cell.output(state)
class RNNWrapper(nn.RNN):
def forward(self, inputs, h_0=None):
output, h_n = super().forward(inputs, h_0)
return output, h_n.squeeze(0)
class LSTMWrapper(nn.LSTM):
# return_output is only here to absorb the argument, making the interface compatible with RNN
def forward(self, inputs, return_output=None, init_state=None):
# init_state is just to absorb the extra argument that can be passed into our custom RNNs. Replaces (h_0, c_0) argument of nn.LSTM
output, (h_n, c_n) = super().forward(inputs, init_state)
return output, (h_n.squeeze(0), c_n.squeeze(0))
def state_size(self):
return self.hidden_size
def output_size(self):
return self.hidden_size
def output(self, state):
return state[0]
| hippo-code-master | model/rnn.py |
# Downloaded from https://github.com/Lezcano/expRNN
"""
Adaptation of expm and expm_frechet in numpy for torch
"""
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import torch
import scipy.special
def _onenorm_matrix_power_nnm(A, p):
"""
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
Parameters
----------
A : a square ndarray or matrix or sparse matrix
Input matrix with non-negative entries.
p : non-negative integer
The power to which the matrix is to be raised.
Returns
-------
out : float
The 1-norm of the matrix power p of A.
"""
# check input
if int(p) != p or p < 0:
raise ValueError('expected non-negative integer p')
p = int(p)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
# Explicitly make a column vector so that this works when A is a
# numpy matrix (in addition to ndarray and sparse matrix).
v = torch.ones((A.shape[0], 1), dtype=A.dtype, device=A.device)
M = A.t()
for _ in range(p):
v = M.mm(v)
return torch.max(v).item()
def _onenorm(A):
return torch.norm(A, 1).item()
def _ident_like(A):
return torch.eye(A.shape[0], A.shape[1], dtype=A.dtype, device=A.device)
class _ExpmPadeHelper(object):
"""
Help lazily evaluate a matrix exponential.
The idea is to not do more work than we need for high expm precision,
so we lazily compute matrix powers and store or precompute
other properties of the matrix.
"""
def __init__(self, A):
"""
Initialize the object.
Parameters
----------
A : a dense or sparse square numpy matrix or ndarray
The matrix to be exponentiated.
"""
self.A = A
self._A2 = None
self._A4 = None
self._A6 = None
self._A8 = None
self._A10 = None
self._d4_exact = None
self._d6_exact = None
self._d8_exact = None
self._d10_exact = None
self._d4_approx = None
self._d6_approx = None
self._d8_approx = None
self._d10_approx = None
self.ident = _ident_like(A)
@property
def A2(self):
if self._A2 is None:
self._A2 = self.A.mm(self.A)
return self._A2
@property
def A4(self):
if self._A4 is None:
self._A4 = self.A2.mm(self.A2)
return self._A4
@property
def A6(self):
if self._A6 is None:
self._A6 = self.A4.mm(self.A2)
return self._A6
@property
def A8(self):
if self._A8 is None:
self._A8 = self.A6.mm(self.A2)
return self._A8
@property
def A10(self):
if self._A10 is None:
self._A10 = self.A4.mm(self.A6)
return self._A10
@property
def d4_tight(self):
if self._d4_exact is None:
self._d4_exact = _onenorm(self.A4)**(1/4.)
return self._d4_exact
@property
def d6_tight(self):
if self._d6_exact is None:
self._d6_exact = _onenorm(self.A6)**(1/6.)
return self._d6_exact
@property
def d8_tight(self):
if self._d8_exact is None:
self._d8_exact = _onenorm(self.A8)**(1/8.)
return self._d8_exact
@property
def d10_tight(self):
if self._d10_exact is None:
self._d10_exact = _onenorm(self.A10)**(1/10.)
return self._d10_exact
@property
def d4_loose(self):
return self.d4_tight
@property
def d6_loose(self):
return self.d6_tight
@property
def d8_loose(self):
return self.d8_tight
@property
def d10_loose(self):
return self.d10_tight
def pade3(self):
b = (120., 60., 12., 1.)
U = self.A.mm(b[3]*self.A2 + b[1]*self.ident)
V = b[2]*self.A2 + b[0]*self.ident
return U, V
def pade5(self):
b = (30240., 15120., 3360., 420., 30., 1.)
U = self.A.mm(b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident)
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
return U, V
def pade7_scaled(self, s):
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
B = self.A * 2**-s
B2 = self.A2 * 2**(-2*s)
B4 = self.A4 * 2**(-4*s)
B6 = self.A6 * 2**(-6*s)
U = B.mm(b[7]*B6 + b[5]*B4 + b[3]*B2 + b[1]*self.ident)
V = b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
return U, V
def expm32(A):
"""
Compute the matrix exponential using Pade approximation.
Parameters
----------
A : (M,M) array_like or sparse matrix
2D Array or Matrix (sparse or dense) to be exponentiated
Returns
-------
expA : (M,M) ndarray
Matrix exponential of `A`
Notes
-----
This is algorithm (6.1) which is a simplification of algorithm (5.1).
.. versionadded:: 0.12.0
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
SIAM Journal on Matrix Analysis and Applications.
31 (3). pp. 970-989. ISSN 1095-7162
"""
return _expm(A)
def _expm(A):
# Core of expm, separated to allow testing exact and approximate
# algorithms.
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# Trivial case
if A.shape == (1, 1):
return torch.exp(A)
# Track functions of A to help compute the matrix exponential.
h = _ExpmPadeHelper(A)
# Try Pade order 3.
eta_1 = max(h.d4_loose, h.d6_loose)
theta3 = 4.2587300348979312e-001
if eta_1 < theta3 and _ell(h.A, 3) == 0:
U, V = h.pade3()
return _solve_P_Q(U, V)
# Try Pade order 5.
eta_2 = max(h.d4_tight, h.d6_loose)
theta5 = 1.8801526985337688e+000
if eta_2 < theta5 and _ell(h.A, 5) == 0:
U, V = h.pade5()
return _solve_P_Q(U, V)
theta_7 = 3.9257248464332842e+000
eta_3 = max(h.d6_tight, h.d8_loose)
s = max(int(np.ceil(np.log2(eta_3 / theta_7))), 0)
s += _ell(2**-s * h.A, 7)
U, V = h.pade7_scaled(s)
X = _solve_P_Q(U, V)
return torch.matrix_power(X, 2**s)
def _solve_P_Q(U, V):
P = U + V
Q = -U + V
return torch.solve(P, Q)[0]
def _ell(A, m):
"""
A helper function for expm_2009.
Parameters
----------
A : linear operator
A linear operator whose norm of power we care about.
m : int
The power of the linear operator
Returns
-------
value : int
A value related to a bound.
"""
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
p = 2*m + 1
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
# They are coefficients of terms of a generating function series expansion.
choose_2p_p = scipy.special.comb(2*p, p, exact=True)
abs_c_recip = float(choose_2p_p * math.factorial(2*p + 1))
# This is explained after Eq. (1.2) of the 2009 expm paper.
# It is the "unit roundoff" of IEEE double precision arithmetic.
u = 2.**-24
# Compute the one-norm of matrix power p of abs(A).
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), p)
# Treat zero norm as a special case.
if not A_abs_onenorm:
return 0
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
return max(int(np.ceil(np.log2(alpha/u) / (2 * m))), 0)
def differential(f, A, E):
""" Computes the differential of f at A when acting on E: (df)_A(E) """
n = A.size(0)
M = torch.zeros(2*n, 2*n, dtype=A.dtype, device=A.device, requires_grad=False)
M[:n, :n] = A
M[n:, n:] = A
M[:n, n:] = E
return f(M)[:n, n:]
| hippo-code-master | model/exprnn/expm32.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
import numpy as np
import scipy.linalg as la
def henaff_init_(A):
size = A.size(0) // 2
diag = A.new(size).uniform_(-np.pi, np.pi)
return create_diag_(A, diag)
def cayley_init_(A):
size = A.size(0) // 2
diag = A.new(size).uniform_(0., np.pi / 2.)
diag = -torch.sqrt((1. - torch.cos(diag))/(1. + torch.cos(diag)))
return create_diag_(A, diag)
# We include a few more initializations that could be useful for other problems
def haar_init_(A):
""" Haar initialization on SO(n) """
torch.nn.init.orthogonal_(A)
with torch.no_grad():
if A.det() < 0.:
# Go bijectively from O^-(n) to O^+(n) \iso SO(n)
idx = np.random.randint(0, A.size(0))
A[idx] *= -1.
An = la.logm(A.data.cpu().numpy()).real
An = .5 * (An - An.T)
A.copy_(torch.tensor(An))
return A
def haar_diag_init_(A):
""" Block-diagonal skew-symmetric matrix with eigenvalues distributed as those from a Haar """
haar_init_(A)
with torch.no_grad():
An = A.data.cpu().numpy()
eig = la.eigvals(An).imag
eig = eig[::2]
if A.size(0) % 2 == 1:
eig = eig[:-1]
eig = torch.tensor(eig)
return create_diag_(A, eig)
def normal_squeeze_diag_init_(A):
size = A.size(0) // 2
diag = A.new(size).normal_(0, 1).fmod_(np.pi/8.)
return create_diag_(A, diag)
def normal_diag_init_(A):
size = A.size(0) // 2
diag = A.new(size).normal_(0, 1).fmod_(np.pi)
return create_diag_(A, diag)
def create_diag_(A, diag):
n = A.size(0)
diag_z = torch.zeros(n-1)
diag_z[::2] = diag
A_init = torch.diag(diag_z, diagonal=1)
A_init = A_init - A_init.T
with torch.no_grad():
A.copy_(A_init)
return A
| hippo-code-master | model/exprnn/initialization.py |
# Adapted from https://github.com/Lezcano/expRNN
import torch
import torch.nn as nn
from .parametrization import Parametrization
class Orthogonal(Parametrization):
""" Class that implements optimization restricted to the Stiefel manifold """
def __init__(self, input_size, output_size, initializer_skew, mode, param):
"""
mode: "static" or a tuple such that:
mode[0] == "dynamic"
mode[1]: int, K, the number of steps after which we should change the basis of the dyn triv
mode[2]: int, M, the number of changes of basis after which we should project back onto the manifold the basis. This is particularly helpful for small values of K.
param: A parametrization of in terms of skew-symmetyric matrices
"""
max_size = max(input_size, output_size)
A = torch.empty(max_size, max_size)
base = torch.empty(input_size, output_size)
super(Orthogonal, self).__init__(A, base, mode)
self.input_size = input_size
self.output_size = output_size
self.param = param
self.init_A = initializer_skew
self.init_base = nn.init.eye_
self.reset_parameters()
def reset_parameters(self):
self.init_A(self.A)
self.init_base(self.base)
def forward(self, input):
return input.matmul(self.B)
def retraction(self, A, base):
# This could be any parametrization of a tangent space
A = A.triu(diagonal=1)
A = A - A.t()
B = base.mm(self.param(A))
if self.input_size != self.output_size:
B = B[:self.input_size, :self.output_size]
return B
def project(self, base):
try:
# Compute the projection using the thin SVD decomposition
U, _, V = torch.svd(base, some=True)
return U.mm(V.t())
except RuntimeError:
# If the svd does not converge, fallback to the (thin) QR decomposition
x = base
if base.size(0) < base.size(1):
x = base.t()
ret = torch.qr(x, some=True).Q
if base.size(0) < base.size(1):
ret = ret.t()
return ret
class modrelu(nn.Module):
def __init__(self, features):
# For now we just support square layers
super(modrelu, self).__init__()
self.features = features
self.b = nn.Parameter(torch.Tensor(self.features))
self.reset_parameters()
def reset_parameters(self):
self.b.data.uniform_(-0.01, 0.01)
def forward(self, inputs):
norm = torch.abs(inputs)
biased_norm = norm + self.b
magnitude = nn.functional.relu(biased_norm)
phase = torch.sign(inputs)
return phase * magnitude
class OrthogonalRNN(nn.Module):
def __init__(self, input_size, hidden_size, initializer_skew, mode, param):
super(OrthogonalRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.recurrent_kernel = Orthogonal(hidden_size, hidden_size, initializer_skew, mode, param=param)
self.input_kernel = nn.Linear(in_features=self.input_size, out_features=self.hidden_size, bias=False)
self.nonlinearity = modrelu(hidden_size)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal_(self.input_kernel.weight.data, nonlinearity="relu")
def default_hidden(self, input):
return input.new_zeros(input.size(0), self.hidden_size, requires_grad=False)
def forward(self, input, hidden):
input = self.input_kernel(input)
hidden = self.recurrent_kernel(hidden)
out = input + hidden
out = self.nonlinearity(out)
return out, out
| hippo-code-master | model/exprnn/orthogonal.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
# from model.exprnn.expm32 import expm32, differential
from .expm32 import expm32, differential
def cayley_map(X):
n = X.size(0)
Id = torch.eye(n, dtype=X.dtype, device=X.device)
return torch.solve(Id - X, Id + X)[0]
class expm_class(torch.autograd.Function):
@staticmethod
def forward(ctx, A):
ctx.save_for_backward(A)
return expm32(A)
@staticmethod
def backward(ctx, G):
(A,) = ctx.saved_tensors
return differential(expm32, A.t(), G)
expm = expm_class.apply
| hippo-code-master | model/exprnn/trivializations.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
import torch.nn as nn
def get_parameters(model):
parametrized_params = []
def get_parametrized_params(mod):
nonlocal parametrized_params
if isinstance(mod, Parametrization):
parametrized_params.append(mod.A)
def not_in(elem, l):
return all(elem is not x for x in l)
model.apply(get_parametrized_params)
unconstrained_params = (param for param in model.parameters() if not_in(param, parametrized_params))
return unconstrained_params, parametrized_params
class Parametrization(nn.Module):
"""
Implements the parametrization of a manifold in terms of a Euclidean space
It gives the parametrized matrix through the attribute `B`
To use it, subclass it and implement the method `retraction` and the method `forward` (and optionally `project`). See the documentation in these methods for details
You can find an example in the file `orthogonal.py` where we implement the Orthogonal class to optimize over the Stiefel manifold using an arbitrary retraction
"""
def __init__(self, A, base, mode):
"""
mode: "static" or a tuple such that:
mode[0] == "dynamic"
mode[1]: int, K, the number of steps after which we should change the basis of the dyn triv
mode[2]: int, M, the number of changes of basis after which we should project back onto the manifold the basis. This is particularly helpful for small values of K.
"""
super(Parametrization, self).__init__()
assert mode == "static" or (isinstance(mode, tuple) and len(mode) == 3 and mode[0] == "dynamic")
self.A = nn.Parameter(A)
self.register_buffer("_B", None)
self.register_buffer('base', base)
# This is necessary, as it will be generated again the first time that self.B is called
# We still need to register the buffer though
if mode == "static":
self.mode = mode
else:
self.mode = mode[0]
self.K = mode[1]
self.M = mode[2]
self.k = 0
self.m = 0
# This implements the parametrization trick in a rather slick way.
# We put a hook on A, such that, whenever its gradients are computed, we
# get rid of self._B so that it has to be recomputed the next time that
# self.B is accessed
def hook(grad):
nonlocal self
self._B = None
self.A.register_hook(hook)
def rebase(self):
with torch.no_grad():
self.base.data.copy_(self._B.data)
self.A.data.zero_()
@property
def B(self):
not_B = self._B is None
if not_B or (not self._B.grad_fn and torch.is_grad_enabled()):
self._B = self.retraction(self.A, self.base)
# Just to be safe
self._B.requires_grad_()
# Now self._B it's not a leaf tensor, so we convert it into a leaf
self._B.retain_grad()
# Increment the counters for the dyntriv algorithm if we have generated B
if self.mode == "dynamic" and not_B:
if self.k == 0:
self.rebase()
# Project the base back to the manifold every M changes of base
# Increment the counter before as we don't project the first time
self.m = (self.m + 1) % self.M
# It's optional to implement this method
if self.m == 0 and hasattr(self, "project"):
with torch.no_grad():
self.base = self.project(self.base)
# Change the basis after K optimization steps
# Increment the counter afterwards as we change the basis in the first iteration
if self.K != "infty":
self.k = (self.k + 1) % self.K
else:
# Make sure that we just update the base once
if self.k == 0:
self.k = 1
return self._B
def retraction(self, A, base):
"""
It computes r_{base}(A).
Notice that A will not always be in the tangent space of our manifold
For this reason, we first have to use A to parametrize the tangent space,
and then compute the retraction
When dealing with Lie groups, raw_A is always projected into the Lie algebra, as an optimization (cf. Section E in the paper)
"""
raise NotImplementedError
def project(self, base):
"""
This method is OPTIONAL
It returns the projected base back into the manifold
"""
raise NotImplementedError
def forward(self, input):
"""
It uses the attribute self.B to implement the layer itself (e.g. Linear, CNN, ...)
"""
raise NotImplementedError
| hippo-code-master | model/exprnn/parametrization.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gym
import torch
from collections import deque, defaultdict
from gym import spaces
import numpy as np
from gym_minigrid.minigrid import OBJECT_TO_IDX, COLOR_TO_IDX
# Helper functions and wrappers
def _format_observation(obs):
obs = torch.tensor(obs)
return obs.view((1, 1) + obs.shape) # (...) -> (T,B,...).
class Minigrid2Image(gym.ObservationWrapper):
"""Get MiniGrid observation to ignore language instruction."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = env.observation_space.spaces['image']
def observation(self, observation):
return observation['image']
class Observation_WrapperSetup:
"""Environment wrapper to format observation items into torch."""
def __init__(self, gym_env, fix_seed=False, env_seed=1):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
self.episode_win = None
self.fix_seed = fix_seed
self.env_seed = env_seed
def initial(self):
initial_reward = torch.zeros(1, 1)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
self.episode_win = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.uint8)
if self.fix_seed:
self.gym_env.seed(seed=self.env_seed)
initial_frame = _format_observation(self.gym_env.reset())
if self.gym_env.carrying:
carried_col, carried_obj = torch.LongTensor([[COLOR_TO_IDX[self.gym_env.carrying.color]]]), torch.LongTensor([[OBJECT_TO_IDX[self.gym_env.carrying.type]]])
else:
carried_col, carried_obj = torch.LongTensor([[5]]), torch.LongTensor([[1]])
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
episode_win=self.episode_win,
carried_col = carried_col,
carried_obj = carried_obj)
def step(self, action):
frame, reward, done, _ = self.gym_env.step(action.item())
self.episode_step += 1
episode_step = self.episode_step
self.episode_return += reward
episode_return = self.episode_return
if done and reward > 0:
self.episode_win[0][0] = 1
else:
self.episode_win[0][0] = 0
episode_win = self.episode_win
if done:
if self.fix_seed:
self.gym_env.seed(seed=self.env_seed)
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
self.episode_win = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_observation(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
if self.gym_env.carrying:
carried_col, carried_obj = torch.LongTensor([[COLOR_TO_IDX[self.gym_env.carrying.color]]]), torch.LongTensor([[OBJECT_TO_IDX[self.gym_env.carrying.type]]])
else:
carried_col, carried_obj = torch.LongTensor([[5]]), torch.LongTensor([[1]])
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step = episode_step,
episode_win = episode_win,
carried_col = carried_col,
carried_obj = carried_obj
)
def get_full_obs(self):
env = self.gym_env.unwrapped
full_grid = env.grid.encode()
full_grid[env.agent_pos[0]][env.agent_pos[1]] = np.array([
OBJECT_TO_IDX['agent'],
COLOR_TO_IDX['red'],
env.agent_dir
])
return full_grid
def close(self):
self.gym_env.close()
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
"""Stack k last frames.
Returns lazy array, which is much more memory efficient."""
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(
low=0,
high=255,
shape=(shp[:-1] + (shp[-1] * k,)),
dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
ob, reward, done, info = self.env.step(action)
self.frames.append(ob)
return self._get_ob(), reward, done, info
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
class LazyFrames(object):
def __init__(self, frames):
"""This object ensures that common frames between the observations are only stored once.
It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay
buffers.
This object should only be converted to numpy array before being passed to the model."""
self._frames = frames
self._out = None
def _force(self):
if self._out is None:
self._out = np.concatenate(self._frames, axis=-1)
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
| adversarially-motivated-intrinsic-goals-main | env_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Naive profiling using timeit."""
import collections
import timeit
class Timings:
"""Not thread-safe."""
def __init__(self):
self._means = collections.defaultdict(int)
self._vars = collections.defaultdict(int)
self._counts = collections.defaultdict(int)
self.reset()
def reset(self):
self.last_time = timeit.default_timer()
def time(self, name):
"""Save an update for event `name`.
Nerd alarm: We could just store a
collections.defaultdict(list)
and compute means and standard deviations at the end. But thanks to the
clever math in Sutton-Barto
(http://www.incompleteideas.net/book/first/ebook/node19.html) and
https://math.stackexchange.com/a/103025/5051 we can update both the
means and the stds online. O(1) FTW!
"""
now = timeit.default_timer()
x = now - self.last_time
self.last_time = now
n = self._counts[name]
mean = self._means[name] + (x - self._means[name]) / (n + 1)
var = (
n * self._vars[name] + n * (self._means[name] - mean) ** 2 + (x - mean) ** 2
) / (n + 1)
self._means[name] = mean
self._vars[name] = var
self._counts[name] += 1
def means(self):
return self._means
def vars(self):
return self._vars
def stds(self):
return {k: v ** 0.5 for k, v in self._vars.items()}
def summary(self, prefix=""):
means = self.means()
stds = self.stds()
total = sum(means.values())
result = prefix
for k in sorted(means, key=means.get, reverse=True):
result += f"\n %s: %.6fms +- %.6fms (%.2f%%) " % (
k,
1000 * means[k],
1000 * stds[k],
100 * means[k] / total,
)
result += "\nTotal: %.6fms" % (1000 * total)
return result
| adversarially-motivated-intrinsic-goals-main | torchbeast/core/prof.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import datetime
import csv
import json
import logging
import os
import time
from typing import Dict
import git
def gather_metadata() -> Dict:
date_start = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
# gathering git metadata
try:
repo = git.Repo(search_parent_directories=True)
git_sha = repo.commit().hexsha
git_data = dict(
commit=git_sha,
branch=None if repo.head.is_detached else repo.active_branch.name,
is_dirty=repo.is_dirty(),
path=repo.git_dir,
)
except git.InvalidGitRepositoryError:
git_data = None
# gathering slurm metadata
if "SLURM_JOB_ID" in os.environ:
slurm_env_keys = [k for k in os.environ if k.startswith("SLURM")]
slurm_data = {}
for k in slurm_env_keys:
d_key = k.replace("SLURM_", "").replace("SLURMD_", "").lower()
slurm_data[d_key] = os.environ[k]
else:
slurm_data = None
return dict(
date_start=date_start,
date_end=None,
successful=False,
git=git_data,
slurm=slurm_data,
env=os.environ.copy(),
)
class FileWriter:
def __init__(
self,
xpid: str = None,
xp_args: dict = None,
rootdir: str = "~/palaas",
symlink_to_latest: bool = True,
):
if not xpid:
# make unique id
xpid = "{proc}_{unixtime}".format(
proc=os.getpid(), unixtime=int(time.time())
)
self.xpid = xpid
self._tick = 0
# metadata gathering
if xp_args is None:
xp_args = {}
self.metadata = gather_metadata()
# we need to copy the args, otherwise when we close the file writer
# (and rewrite the args) we might have non-serializable objects (or
# other nasty stuff).
self.metadata["args"] = copy.deepcopy(xp_args)
self.metadata["xpid"] = self.xpid
formatter = logging.Formatter("%(message)s")
self._logger = logging.getLogger("palaas/out")
# to stdout handler
shandle = logging.StreamHandler()
shandle.setFormatter(formatter)
self._logger.addHandler(shandle)
self._logger.setLevel(logging.INFO)
rootdir = os.path.expandvars(os.path.expanduser(rootdir))
# to file handler
self.basepath = os.path.join(rootdir, self.xpid)
if not os.path.exists(self.basepath):
self._logger.info("Creating log directory: %s", self.basepath)
os.makedirs(self.basepath, exist_ok=True)
else:
self._logger.info("Found log directory: %s", self.basepath)
if symlink_to_latest:
# Add 'latest' as symlink unless it exists and is no symlink.
symlink = os.path.join(rootdir, "latest")
try:
if os.path.islink(symlink):
os.remove(symlink)
if not os.path.exists(symlink):
os.symlink(self.basepath, symlink)
self._logger.info("Symlinked log directory: %s", symlink)
except OSError:
# os.remove() or os.symlink() raced. Don't do anything.
pass
self.paths = dict(
msg="{base}/out.log".format(base=self.basepath),
logs="{base}/logs.csv".format(base=self.basepath),
fields="{base}/fields.csv".format(base=self.basepath),
meta="{base}/meta.json".format(base=self.basepath),
)
self._logger.info("Saving arguments to %s", self.paths["meta"])
if os.path.exists(self.paths["meta"]):
self._logger.warning(
"Path to meta file already exists. " "Not overriding meta."
)
else:
self._save_metadata()
self._logger.info("Saving messages to %s", self.paths["msg"])
if os.path.exists(self.paths["msg"]):
self._logger.warning(
"Path to message file already exists. " "New data will be appended."
)
fhandle = logging.FileHandler(self.paths["msg"])
fhandle.setFormatter(formatter)
self._logger.addHandler(fhandle)
self._logger.info("Saving logs data to %s", self.paths["logs"])
self._logger.info("Saving logs' fields to %s", self.paths["fields"])
if os.path.exists(self.paths["logs"]):
self._logger.warning(
"Path to log file already exists. " "New data will be appended."
)
with open(self.paths["fields"], "r") as csvfile:
reader = csv.reader(csvfile)
self.fieldnames = list(reader)[0]
else:
self.fieldnames = ["_tick", "_time"]
self._fieldfile = open(self.paths["fields"], "w")
self._fieldwriter = csv.writer(self._fieldfile)
self._logfile = open(self.paths["logs"], "a")
self._logwriter = csv.DictWriter(self._logfile, fieldnames=self.fieldnames)
def log(self, to_log: Dict, tick: int = None, verbose: bool = False) -> None:
if tick is not None:
raise NotImplementedError
else:
to_log["_tick"] = self._tick
self._tick += 1
to_log["_time"] = time.time()
old_len = len(self.fieldnames)
for k in to_log:
if k not in self.fieldnames:
self.fieldnames.append(k)
if old_len != len(self.fieldnames):
self._fieldwriter.writerow(self.fieldnames)
self._logger.info("Updated log fields: %s", self.fieldnames)
if to_log["_tick"] == 0:
self._logfile.write("# %s\n" % ",".join(self.fieldnames))
if verbose:
self._logger.info(
"LOG | %s",
", ".join(["{}: {}".format(k, to_log[k]) for k in sorted(to_log)]),
)
self._logwriter.writerow(to_log)
self._logfile.flush()
def close(self, successful: bool = True) -> None:
self.metadata["date_end"] = datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S.%f"
)
self.metadata["successful"] = successful
self._save_metadata()
for f in [self._logfile, self._fieldfile]:
f.close()
def _save_metadata(self) -> None:
with open(self.paths["meta"], "w") as jsonfile:
json.dump(self.metadata, jsonfile, indent=4, sort_keys=True)
| adversarially-motivated-intrinsic-goals-main | torchbeast/core/file_writer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This file taken from
# https://github.com/deepmind/scalable_agent/blob/
# cd66d00914d56c8ba2f0615d9cdeefcb169a8d70/vtrace.py
# and modified.
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
"""
import collections
import torch
import torch.nn.functional as F
VTraceFromLogitsReturns = collections.namedtuple(
"VTraceFromLogitsReturns",
[
"vs",
"pg_advantages",
"log_rhos",
"behavior_action_log_probs",
"target_action_log_probs",
],
)
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def action_log_probs(policy_logits, actions):
return -F.nll_loss(
F.log_softmax(torch.flatten(policy_logits, 0, -2), dim=-1),
torch.flatten(actions),
reduction="none",
).view_as(actions)
def from_logits(
behavior_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace for softmax policies."""
target_action_log_probs = action_log_probs(target_policy_logits, actions)
behavior_action_log_probs = action_log_probs(behavior_policy_logits, actions)
log_rhos = target_action_log_probs - behavior_action_log_probs
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behavior_action_log_probs=behavior_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict(),
)
@torch.no_grad()
def from_importance_weights(
log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
):
"""V-trace from log importance weights."""
with torch.no_grad():
rhos = torch.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = torch.clamp(rhos, max=clip_rho_threshold)
else:
clipped_rhos = rhos
cs = torch.clamp(rhos, max=1.0)
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = torch.cat(
[values[1:], torch.unsqueeze(bootstrap_value, 0)], dim=0
)
deltas = clipped_rhos * (rewards + discounts * values_t_plus_1 - values)
acc = torch.zeros_like(bootstrap_value)
result = []
for t in range(discounts.shape[0] - 1, -1, -1):
acc = deltas[t] + discounts[t] * cs[t] * acc
result.append(acc)
result.reverse()
vs_minus_v_xs = torch.stack(result)
# Add V(x_s) to get v_s.
vs = torch.add(vs_minus_v_xs, values)
# Advantage for policy gradient.
broadcasted_bootstrap_values = torch.ones_like(vs[0]) * bootstrap_value
vs_t_plus_1 = torch.cat(
[vs[1:], broadcasted_bootstrap_values.unsqueeze(0)], dim=0
)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = torch.clamp(rhos, max=clip_pg_rho_threshold)
else:
clipped_pg_rhos = rhos
pg_advantages = clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values)
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(vs=vs, pg_advantages=pg_advantages)
| adversarially-motivated-intrinsic-goals-main | torchbeast/core/vtrace.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""The environment class."""
import torch
def _format_frame(frame):
frame = torch.from_numpy(frame)
return frame.view((1, 1) + frame.shape) # (...) -> (T,B,...).
class Environment:
def __init__(self, gym_env):
self.gym_env = gym_env
self.episode_return = None
self.episode_step = None
def initial(self):
initial_reward = torch.zeros(1, 1)
# This supports only single-tensor actions ATM.
initial_last_action = torch.zeros(1, 1, dtype=torch.int64)
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
initial_done = torch.ones(1, 1, dtype=torch.bool)
initial_frame = _format_frame(self.gym_env.reset())
return dict(
frame=initial_frame,
reward=initial_reward,
done=initial_done,
episode_return=self.episode_return,
episode_step=self.episode_step,
last_action=initial_last_action,
)
def step(self, action):
frame, reward, done, unused_info = self.gym_env.step(action.item())
self.episode_step += 1
self.episode_return += reward
episode_step = self.episode_step
episode_return = self.episode_return
if done:
frame = self.gym_env.reset()
self.episode_return = torch.zeros(1, 1)
self.episode_step = torch.zeros(1, 1, dtype=torch.int32)
frame = _format_frame(frame)
reward = torch.tensor(reward).view(1, 1)
done = torch.tensor(done).view(1, 1)
return dict(
frame=frame,
reward=reward,
done=done,
episode_return=episode_return,
episode_step=episode_step,
last_action=action,
)
def close(self):
self.gym_env.close()
| adversarially-motivated-intrinsic-goals-main | torchbeast/core/environment.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| adversarially-motivated-intrinsic-goals-main | monobeast/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Must be run with OMP_NUM_THREADS=1
import random
import argparse
import logging
import os
import threading
import time
import timeit
import traceback
import pprint
import typing
import torch
from torch import multiprocessing as mp
from torch import nn
from torch.nn import functional as F
import gym
import gym_minigrid.wrappers as wrappers
from torch.distributions.normal import Normal
from torchbeast.core import environment
from torchbeast.core import file_writer
from torchbeast.core import prof
from torchbeast.core import vtrace
from env_utils import Observation_WrapperSetup, FrameStack
# Some Global Variables
# We start t* at 7 steps.
generator_batch = dict()
generator_batch_aux = dict()
generator_current_target = 7.0
generator_count = 0
# yapf: disable
parser = argparse.ArgumentParser(description='PyTorch Scalable Agent')
parser.add_argument('--env', type=str, default='MiniGrid-Empty-8x8-v0',
help='Gym environment.')
parser.add_argument('--mode', default='train',
choices=['train', 'test', 'test_render'],
help='Training or test mode.')
parser.add_argument('--xpid', default=None,
help='Experiment id (default: None).')
# Training settings.
parser.add_argument('--disable_checkpoint', action='store_true',
help='Disable saving checkpoint.')
parser.add_argument('--savedir', default='./experimentsMinigrid',
help='Root dir where experiment data will be saved.')
parser.add_argument('--total_frames', default=600000000, type=int, metavar='T',
help='Total environment frames to train for.')
parser.add_argument('--num_actors', default=4, type=int, metavar='N',
help='Number of actors (default: 4).')
parser.add_argument('--num_buffers', default=None, type=int,
metavar='N', help='Number of shared-memory buffers.')
parser.add_argument('--num_threads', default=4, type=int,
metavar='N', help='Number learner threads.')
parser.add_argument('--disable_cuda', action='store_true',
help='Disable CUDA.')
# Loss settings.
parser.add_argument('--entropy_cost', default=0.0005, type=float,
help='Entropy cost/multiplier.')
parser.add_argument('--generator_entropy_cost', default=0.05, type=float,
help='Entropy cost/multiplier.')
parser.add_argument('--baseline_cost', default=0.5, type=float,
help='Baseline cost/multiplier.')
parser.add_argument('--discounting', default=0.99, type=float,
help='Discounting factor.')
parser.add_argument('--reward_clipping', default='abs_one',
choices=['abs_one', 'soft_asymmetric', 'none'],
help='Reward clipping.')
# Optimizer settings.
parser.add_argument('--learning_rate', default=0.001, type=float,
metavar='LR', help='Learning rate.')
parser.add_argument('--generator_learning_rate', default=0.002, type=float,
metavar='LR', help='Learning rate.')
parser.add_argument('--alpha', default=0.99, type=float,
help='RMSProp smoothing constant.')
parser.add_argument('--momentum', default=0, type=float,
help='RMSProp momentum.')
parser.add_argument('--epsilon', default=0.01, type=float,
help='RMSProp epsilon.')
# Other Hyperparameters
parser.add_argument('--batch_size', default=8, type=int, metavar='B',
help='Learner batch size (default: 4).')
parser.add_argument('--generator_batch_size', default=32, type=int, metavar='BB',
help='Learner batch size (default: 4).')
parser.add_argument('--unroll_length', default=100, type=int, metavar='T',
help='The unroll length (time dimension; default: 64).')
parser.add_argument('--goal_dim', default=10, type=int,
help='Size of Goal Embedding')
parser.add_argument('--state_embedding_dim', default=256, type=int,
help='Dimension of the state embedding representation used in the student')
parser.add_argument('--generator_reward_negative', default= -0.1, type=float,
help='Coefficient for the intrinsic reward')
parser.add_argument('--generator_threshold', default=-0.5, type=float,
help='Threshold mean reward for wich scheduler increases difficulty')
parser.add_argument('--generator_counts', default=10, type=int,
help='Number of time before generator increases difficulty')
parser.add_argument('--generator_maximum', default=100, type=float,
help='Maximum difficulty')
parser.add_argument('--generator_reward_coef', default=1.0, type=float,
help='Coefficient for the generator reward')
# Map Layout
parser.add_argument('--fix_seed', action='store_true',
help='Fix the environment seed so that it is \
no longer procedurally generated but rather a layout every time.')
parser.add_argument('--env_seed', default=1, type=int,
help='The seed to set for the env if we are using a single fixed seed.')
parser.add_argument('--inner', action='store_true',
help='Exlucde outer wall')
parser.add_argument('--num_input_frames', default=1, type=int,
help='Number of input frames to the model and state embedding including the current frame \
When num_input_frames > 1, it will also take the previous num_input_frames - 1 frames as input.')
# Ablations and other settings
parser.add_argument("--use_lstm", action="store_true",
help="Use LSTM in agent model.")
parser.add_argument('--num_lstm_layers', default=1, type=int,
help='Lstm layers.')
parser.add_argument('--disable_use_embedding', action='store_true',
help='Disable embeddings.')
parser.add_argument('--no_extrinsic_rewards', action='store_true',
help='Only intrinsic rewards.')
parser.add_argument('--no_generator', action='store_true',
help='Use vanilla policy-deprecated')
parser.add_argument('--intrinsic_reward_coef', default=1.0, type=float,
help='Coefficient for the intrinsic reward')
parser.add_argument('--random_agent', action='store_true',
help='Use a random agent to test the env.')
parser.add_argument('--novelty', action='store_true',
help='Discount rewards based on times goal has been proposed.')
parser.add_argument('--novelty_bonus', default=0.1, type=float,
help='Bonus you get for proposing objects if novelty')
parser.add_argument('--novelty_coef', default=0.3, type=float,
help='Modulates novelty bonus if novelty')
parser.add_argument('--restart_episode', action='store_true',
help='Restart Episode when reaching intrinsic goal.')
parser.add_argument('--modify', action='store_true',
help='Modify Goal instead of having to reach the goal')
parser.add_argument('--no_boundary_awareness', action='store_true',
help='Remove Episode Boundary Awareness')
parser.add_argument('--generator_loss_form', type=str, default='threshold',
help='[threshold,dummy,gaussian, linear]')
parser.add_argument('--generator_target', default=5.0, type=float,
help='Mean target for Gassian and Linear Rewards')
parser.add_argument('--target_variance', default=15.0, type=float,
help='Variance for the Gaussian Reward')
# yapf: enable
logging.basicConfig(
format=(
"[%(levelname)s:%(process)d %(module)s:%(lineno)d %(asctime)s] " "%(message)s"
),
level=0,
)
Buffers = typing.Dict[str, typing.List[torch.Tensor]]
def compute_baseline_loss(advantages):
# Take the mean over batch, sum over time.
return 0.5 * torch.sum(torch.mean(advantages ** 2, dim=1))
def compute_entropy_loss(logits):
# Regularizing Entropy Loss
policy = F.softmax(logits, dim=-1)
log_policy = F.log_softmax(logits, dim=-1)
entropy_per_timestep = torch.sum(-policy * log_policy, dim=-1)
return -torch.sum(torch.mean(entropy_per_timestep, dim=1))
def compute_policy_gradient_loss(logits, actions, advantages):
# Main Policy Loss
cross_entropy = F.nll_loss(
F.log_softmax(torch.flatten(logits, 0, 1), dim=-1),
target=torch.flatten(actions, 0, 1),
reduction="none",
)
cross_entropy = cross_entropy.view_as(advantages)
advantages.requires_grad = False
policy_gradient_loss_per_timestep = cross_entropy * advantages
return torch.sum(torch.mean(policy_gradient_loss_per_timestep, dim=1))
def act(
actor_index: int,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
model: torch.nn.Module,
generator_model,
buffers: Buffers,
initial_agent_state_buffers, flags):
"""Defines and generates IMPALA actors in multiples threads."""
try:
logging.info("Actor %i started.", actor_index)
timings = prof.Timings() # Keep track of how fast things are.
gym_env = create_env(flags)
seed = actor_index ^ int.from_bytes(os.urandom(4), byteorder="little")
gym_env.seed(seed)
#gym_env = wrappers.FullyObsWrapper(gym_env)
if flags.num_input_frames > 1:
gym_env = FrameStack(gym_env, flags.num_input_frames)
env = Observation_WrapperSetup(gym_env, fix_seed=flags.fix_seed, env_seed=flags.env_seed)
env_output = env.initial()
initial_frame = env_output['frame']
agent_state = model.initial_state(batch_size=1)
generator_output = generator_model(env_output)
goal = generator_output["goal"]
agent_output, unused_state = model(env_output, agent_state, goal)
while True:
index = free_queue.get()
if index is None:
break
# Write old rollout end.
for key in env_output:
buffers[key][index][0, ...] = env_output[key]
for key in agent_output:
buffers[key][index][0, ...] = agent_output[key]
for key in generator_output:
buffers[key][index][0, ...] = generator_output[key]
buffers["initial_frame"][index][0, ...] = initial_frame
for i, tensor in enumerate(agent_state):
initial_agent_state_buffers[index][i][...] = tensor
# Do new rollout
for t in range(flags.unroll_length):
aux_steps = 0
timings.reset()
if flags.modify:
new_frame = torch.flatten(env_output['frame'], 2, 3)
old_frame = torch.flatten(initial_frame, 2, 3)
ans = new_frame == old_frame
ans = torch.sum(ans, 3) != 3 # Reached if the three elements of the frame are not the same.
reached_condition = torch.squeeze(torch.gather(ans, 2, torch.unsqueeze(goal.long(),2)))
else:
agent_location = torch.flatten(env_output['frame'], 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(agent_output["action"].shape)
reached_condition = goal == agent_location
if reached_condition: # Generate new goal when reached intrinsic goal
if flags.restart_episode:
env_output = env.initial()
else:
env.episode_step = 0
initial_frame = env_output['frame']
with torch.no_grad():
generator_output = generator_model(env_output)
goal = generator_output["goal"]
if env_output['done'][0] == 1: # Generate a New Goal when episode finished
initial_frame = env_output['frame']
with torch.no_grad():
generator_output = generator_model(env_output)
goal = generator_output["goal"]
with torch.no_grad():
agent_output, agent_state = model(env_output, agent_state, goal)
timings.time("model")
env_output = env.step(agent_output["action"])
timings.time("step")
for key in env_output:
buffers[key][index][t + 1, ...] = env_output[key]
for key in agent_output:
buffers[key][index][t + 1, ...] = agent_output[key]
for key in generator_output:
buffers[key][index][t + 1, ...] = generator_output[key]
buffers["initial_frame"][index][t + 1, ...] = initial_frame
timings.time("write")
full_queue.put(index)
if actor_index == 0:
logging.info("Actor %i: %s", actor_index, timings.summary())
except KeyboardInterrupt:
pass # Return silently.
except Exception as e:
logging.error("Exception in worker process %i", actor_index)
traceback.print_exc()
print()
raise e
def get_batch(
flags,
free_queue: mp.SimpleQueue,
full_queue: mp.SimpleQueue,
buffers: Buffers,
initial_agent_state_buffers,
timings,
lock=threading.Lock()):
"""Returns a Batch with the history."""
with lock:
timings.time("lock")
indices = [full_queue.get() for _ in range(flags.batch_size)]
timings.time("dequeue")
batch = {
key: torch.stack([buffers[key][m] for m in indices], dim=1) for key in buffers
}
initial_agent_state = (
torch.cat(ts, dim=1)
for ts in zip(*[initial_agent_state_buffers[m] for m in indices])
)
timings.time("batch")
for m in indices:
free_queue.put(m)
timings.time("enqueue")
batch = {k: t.to(device=flags.device, non_blocking=True) for k, t in batch.items()}
initial_agent_state = tuple(t.to(device=flags.device, non_blocking=True)
for t in initial_agent_state)
timings.time("device")
return batch, initial_agent_state
def reached_goal_func(frames, goals, initial_frames = None, done_aux = None):
"""Auxiliary function which evaluates whether agent has reached the goal."""
if flags.modify:
new_frame = torch.flatten(frames, 2, 3)
old_frame = torch.flatten(initial_frames, 2, 3)
ans = new_frame == old_frame
ans = torch.sum(ans, 3) != 3 # reached if the three elements are not the same
reached = torch.squeeze(torch.gather(ans, 2, torch.unsqueeze(goals.long(),2)))
if flags.no_boundary_awareness:
reached = reached.float() * (1 - done_aux.float())
return reached
else:
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(goals.shape)
return (goals == agent_location).float()
def learn(
actor_model, model, actor_generator_model, generator_model, batch, initial_agent_state, optimizer, generator_model_optimizer, scheduler, generator_scheduler, flags, max_steps=100.0, lock=threading.Lock()
):
"""Performs a learning (optimization) step for the policy, and for the generator whenever the generator batch is full."""
with lock:
# Loading Batch
next_frame = batch['frame'][1:].float().to(device=flags.device)
initial_frames = batch['initial_frame'][1:].float().to(device=flags.device)
done_aux = batch['done'][1:].float().to(device=flags.device)
reached_goal = reached_goal_func(next_frame, batch['goal'][1:].to(device=flags.device), initial_frames = initial_frames, done_aux = done_aux)
intrinsic_rewards = flags.intrinsic_reward_coef * reached_goal
reached = reached_goal.type(torch.bool)
intrinsic_rewards = intrinsic_rewards*(intrinsic_rewards - 0.9 * (batch["episode_step"][1:].float()/max_steps))
learner_outputs, unused_state = model(batch, initial_agent_state, batch['goal'])
bootstrap_value = learner_outputs["baseline"][-1]
batch = {key: tensor[1:] for key, tensor in batch.items()}
learner_outputs = {key: tensor[:-1] for key, tensor in learner_outputs.items()}
rewards = batch["reward"]
# Student Rewards
if flags.no_generator:
total_rewards = rewards
elif flags.no_extrinsic_rewards:
total_rewards = intrinsic_rewards
else:
total_rewards = rewards + intrinsic_rewards
if flags.reward_clipping == "abs_one":
clipped_rewards = torch.clamp(total_rewards, -1, 1)
elif flags.reward_clipping == "soft_asymmetric":
squeezed = torch.tanh(total_rewards / 5.0)
# Negative rewards are given less weight than positive rewards.
clipped_rewards = torch.where(total_rewards < 0, 0.3 * squeezed, squeezed) * 5.0
elif flags.reward_clipping == "none":
clipped_rewards = total_rewards
discounts = (~batch["done"]).float() * flags.discounting
clipped_rewards += 1.0 * (rewards>0.0).float()
vtrace_returns = vtrace.from_logits(
behavior_policy_logits=batch["policy_logits"],
target_policy_logits=learner_outputs["policy_logits"],
actions=batch["action"],
discounts=discounts,
rewards=clipped_rewards,
values=learner_outputs["baseline"],
bootstrap_value=bootstrap_value,
)
# Student Loss
# Compute loss as a weighted sum of the baseline loss, the policy
# gradient loss and an entropy regularization term.
pg_loss = compute_policy_gradient_loss(
learner_outputs["policy_logits"],
batch["action"],
vtrace_returns.pg_advantages,
)
baseline_loss = flags.baseline_cost * compute_baseline_loss(
vtrace_returns.vs - learner_outputs["baseline"]
)
entropy_loss = flags.entropy_cost * compute_entropy_loss(
learner_outputs["policy_logits"]
)
total_loss = pg_loss + baseline_loss + entropy_loss
episode_returns = batch["episode_return"][batch["done"]]
if torch.isnan(torch.mean(episode_returns)):
aux_mean_episode = 0.0
else:
aux_mean_episode = torch.mean(episode_returns).item()
stats = {
"episode_returns": tuple(episode_returns.cpu().numpy()),
"mean_episode_return": aux_mean_episode,
"total_loss": total_loss.item(),
"pg_loss": pg_loss.item(),
"baseline_loss": baseline_loss.item(),
"entropy_loss": entropy_loss.item(),
"gen_rewards": None,
"gg_loss": None,
"generator_baseline_loss": None,
"generator_entropy_loss": None,
"mean_intrinsic_rewards": None,
"mean_episode_steps": None,
"ex_reward": None,
"generator_current_target": None,
}
if flags.no_generator:
stats["gen_rewards"] = 0.0,
stats["gg_loss"] = 0.0,
stats["generator_baseline_loss"] = 0.0,
stats["generator_entropy_loss"] = 0.0,
stats["mean_intrinsic_rewards"] = 0.0,
stats["mean_episode_steps"] = 0.0,
stats["ex_reward"] = 0.0,
stats["generator_current_target"] = 0.0,
scheduler.step()
optimizer.zero_grad()
total_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 40.0)
optimizer.step()
actor_model.load_state_dict(model.state_dict())
# Generator:
if not flags.no_generator:
global generator_batch
global generator_batch_aux
global generator_current_target
global generator_count
global goal_count_dict
# Loading Batch
is_done = batch['done']==1
reached = reached_goal.type(torch.bool)
if 'frame' in generator_batch.keys():
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][is_done].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][is_done].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][is_done].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][is_done].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.zeros(batch['goal'].shape)[is_done].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][is_done].float().to(device=flags.device)), 0)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][reached].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][reached].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][reached].float().to(device=flags.device)), 0)
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][reached].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][reached].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][reached].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][reached].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.ones(batch['goal'].shape)[reached].float().to(device=flags.device)), 0)
else:
generator_batch['frame'] = (batch['initial_frame'][is_done]).float().to(device=flags.device) # Notice we use initial_frame from batch
generator_batch['goal'] = (batch['goal'][is_done]).to(device=flags.device)
generator_batch['episode_step'] = (batch['episode_step'][is_done]).float().to(device=flags.device)
generator_batch['generator_logits'] = (batch['generator_logits'][is_done]).float().to(device=flags.device)
generator_batch['reached'] = (torch.zeros(batch['goal'].shape)[is_done]).float().to(device=flags.device)
generator_batch['ex_reward'] = (batch['reward'][is_done]).float().to(device=flags.device)
generator_batch['carried_obj'] = (batch['carried_obj'][is_done]).float().to(device=flags.device)
generator_batch['carried_col'] = (batch['carried_col'][is_done]).float().to(device=flags.device)
generator_batch['carried_obj'] = torch.cat((generator_batch['carried_obj'], batch['carried_obj'][reached].float().to(device=flags.device)), 0)
generator_batch['carried_col'] = torch.cat((generator_batch['carried_col'], batch['carried_col'][reached].float().to(device=flags.device)), 0)
generator_batch['ex_reward'] = torch.cat((generator_batch['ex_reward'], batch['reward'][reached].float().to(device=flags.device)), 0)
generator_batch['frame'] = torch.cat((generator_batch['frame'], batch['initial_frame'][reached].float().to(device=flags.device)), 0)
generator_batch['goal'] = torch.cat((generator_batch['goal'], batch['goal'][reached].to(device=flags.device)), 0)
generator_batch['episode_step'] = torch.cat((generator_batch['episode_step'], batch['episode_step'][reached].float().to(device=flags.device)), 0)
generator_batch['generator_logits'] = torch.cat((generator_batch['generator_logits'], batch['generator_logits'][reached].float().to(device=flags.device)), 0)
generator_batch['reached'] = torch.cat((generator_batch['reached'], torch.ones(batch['goal'].shape)[reached].float().to(device=flags.device)), 0)
if generator_batch['frame'].shape[0] >= flags.generator_batch_size: # Run Gradient step, keep batch residual in batch_aux
for key in generator_batch:
generator_batch_aux[key] = generator_batch[key][flags.generator_batch_size:]
generator_batch[key] = generator_batch[key][:flags.generator_batch_size].unsqueeze(0)
generator_outputs = generator_model(generator_batch)
generator_bootstrap_value = generator_outputs["generator_baseline"][-1]
# Generator Reward
def distance2(episode_step, reached, targ=flags.generator_target):
aux = flags.generator_reward_negative * torch.ones(episode_step.shape).to(device=flags.device)
aux += (episode_step >= targ).float() * reached
return aux
if flags.generator_loss_form == 'gaussian':
generator_target = flags.generator_target * torch.ones(generator_batch['episode_step'].shape).to(device=flags.device)
gen_reward = Normal(generator_target, flags.target_variance*torch.ones(generator_target.shape).to(device=flags.device))
generator_rewards = flags.generator_reward_coef * (2 + gen_reward.log_prob(generator_batch['episode_step']) - gen_reward.log_prob(generator_target)) * generator_batch['reached'] -1
elif flags.generator_loss_form == 'linear':
generator_rewards = (generator_batch['episode_step']/flags.generator_target * (generator_batch['episode_step'] <= flags.generator_target).float() + \
torch.exp ((-generator_batch['episode_step'] + flags.generator_target)/20.0) * (generator_batch['episode_step'] > flags.generator_target).float()) * \
2*generator_batch['reached'] - 1
elif flags.generator_loss_form == 'dummy':
generator_rewards = torch.tensor(distance2(generator_batch['episode_step'], generator_batch['reached'])).to(device=flags.device)
elif flags.generator_loss_form == 'threshold':
generator_rewards = torch.tensor(distance2(generator_batch['episode_step'], generator_batch['reached'], targ=generator_current_target)).to(device=flags.device)
if torch.mean(generator_rewards).item() >= flags.generator_threshold:
generator_count += 1
else:
generator_count = 0
if generator_count >= flags.generator_counts and generator_current_target<=flags.generator_maximum:
generator_current_target += 1.0
generator_count = 0
goal_count_dict *= 0.0
if flags.novelty:
frames_aux = torch.flatten(generator_batch['frame'], 2, 3)
frames_aux = frames_aux[:,:,:,0]
object_ids =torch.zeros(generator_batch['goal'].shape).long()
for i in range(object_ids.shape[1]):
object_ids[0,i] = frames_aux[0,i,generator_batch['goal'][0,i]]
goal_count_dict[object_ids[0,i]] += 1
bonus = (object_ids>2).float().to(device=flags.device) * flags.novelty_bonus
generator_rewards += bonus
if flags.reward_clipping == "abs_one":
generator_clipped_rewards = torch.clamp(generator_rewards, -1, 1)
if not flags.no_extrinsic_rewards:
generator_clipped_rewards = 1.0 * (generator_batch['ex_reward'] > 0).float() + generator_clipped_rewards * (generator_batch['ex_reward'] <= 0).float()
generator_discounts = torch.zeros(generator_batch['episode_step'].shape).float().to(device=flags.device)
goals_aux = generator_batch["goal"]
if flags.inner:
goals_aux = goals_aux.float()
goals_aux -= 2 * (torch.floor(goals_aux/generator_model.height))
goals_aux -= generator_model.height -1
goals_aux = goals_aux.long()
generator_vtrace_returns = vtrace.from_logits(
behavior_policy_logits=generator_batch["generator_logits"],
target_policy_logits=generator_outputs["generator_logits"],
actions=goals_aux,
discounts=generator_discounts,
rewards=generator_clipped_rewards,
values=generator_outputs["generator_baseline"],
bootstrap_value=generator_bootstrap_value,
)
# Generator Loss
gg_loss = compute_policy_gradient_loss(
generator_outputs["generator_logits"],
goals_aux,
generator_vtrace_returns.pg_advantages,
)
generator_baseline_loss = flags.baseline_cost * compute_baseline_loss(
generator_vtrace_returns.vs - generator_outputs["generator_baseline"]
)
generator_entropy_loss = flags.generator_entropy_cost * compute_entropy_loss(
generator_outputs["generator_logits"]
)
generator_total_loss = gg_loss + generator_entropy_loss +generator_baseline_loss
intrinsic_rewards_gen = generator_batch['reached']*(1- 0.9 * (generator_batch["episode_step"].float()/max_steps))
stats["gen_rewards"] = torch.mean(generator_clipped_rewards).item()
stats["gg_loss"] = gg_loss.item()
stats["generator_baseline_loss"] = generator_baseline_loss.item()
stats["generator_entropy_loss"] = generator_entropy_loss.item()
stats["mean_intrinsic_rewards"] = torch.mean(intrinsic_rewards_gen).item()
stats["mean_episode_steps"] = torch.mean(generator_batch["episode_step"]).item()
stats["ex_reward"] = torch.mean(generator_batch['ex_reward']).item()
stats["generator_current_target"] = generator_current_target
generator_scheduler.step()
generator_model_optimizer.zero_grad()
generator_total_loss.backward()
nn.utils.clip_grad_norm_(generator_model.parameters(), 40.0)
generator_model_optimizer.step()
actor_generator_model.load_state_dict(generator_model.state_dict())
if generator_batch_aux['frame'].shape[0]>0:
generator_batch = {key: tensor[:] for key, tensor in generator_batch_aux.items()}
else:
generator_batch = dict()
return stats
def create_buffers(obs_shape, num_actions, flags, width, height, logits_size) -> Buffers:
T = flags.unroll_length
specs = dict(
frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
reward=dict(size=(T + 1,), dtype=torch.float32),
done=dict(size=(T + 1,), dtype=torch.bool),
episode_return=dict(size=(T + 1,), dtype=torch.float32),
episode_step=dict(size=(T + 1,), dtype=torch.int32),
last_action=dict(size=(T + 1,), dtype=torch.int64),
policy_logits=dict(size=(T + 1, num_actions), dtype=torch.float32),
baseline=dict(size=(T + 1,), dtype=torch.float32),
generator_baseline=dict(size=(T + 1,), dtype=torch.float32),
action=dict(size=(T + 1,), dtype=torch.int64),
episode_win=dict(size=(T + 1,), dtype=torch.int32),
generator_logits=dict(size=(T + 1, logits_size), dtype=torch.float32),
goal=dict(size=(T + 1,), dtype=torch.int64),
initial_frame=dict(size=(T + 1, *obs_shape), dtype=torch.uint8),
carried_col =dict(size=(T + 1,), dtype=torch.int64),
carried_obj =dict(size=(T + 1,), dtype=torch.int64),
)
buffers: Buffers = {key: [] for key in specs}
for _ in range(flags.num_buffers):
for key in buffers:
buffers[key].append(torch.empty(**specs[key]).share_memory_())
return buffers
def train(flags):
"""Full training loop."""
if flags.xpid is None:
flags.xpid = "torchbeast-%s" % time.strftime("%Y%m%d-%H%M%S")
plogger = file_writer.FileWriter(
xpid=flags.xpid, xp_args=flags.__dict__, rootdir=flags.savedir
)
checkpointpath = os.path.expandvars(
os.path.expanduser("%s/%s/%s" % (flags.savedir, flags.xpid, "model.tar"))
)
if flags.num_buffers is None: # Set sensible default for num_buffers.
flags.num_buffers = max(2 * flags.num_actors, flags.batch_size)
if flags.num_actors >= flags.num_buffers:
raise ValueError("num_buffers should be larger than num_actors")
T = flags.unroll_length
B = flags.batch_size
flags.device = None
if not flags.disable_cuda and torch.cuda.is_available():
logging.info("Using CUDA.")
flags.device = torch.device("cuda")
else:
logging.info("Not using CUDA.")
flags.device = torch.device("cpu")
env = create_env(flags)
#env = wrappers.FullyObsWrapper(env)
if flags.num_input_frames > 1:
env = FrameStack(env, flags.num_input_frames)
generator_model = Generator(env.observation_space.shape, env.width, env.height, num_input_frames=flags.num_input_frames)
model = Net(env.observation_space.shape, env.action_space.n, state_embedding_dim=flags.state_embedding_dim, num_input_frames=flags.num_input_frames, use_lstm=flags.use_lstm, num_lstm_layers=flags.num_lstm_layers)
global goal_count_dict
goal_count_dict = torch.zeros(11).float().to(device=flags.device)
if flags.inner:
logits_size = (env.width-2)*(env.height-2)
else:
logits_size = env.width * env.height
buffers = create_buffers(env.observation_space.shape, model.num_actions, flags, env.width, env.height, logits_size)
model.share_memory()
generator_model.share_memory()
# Add initial RNN state.
initial_agent_state_buffers = []
for _ in range(flags.num_buffers):
state = model.initial_state(batch_size=1)
for t in state:
t.share_memory_()
initial_agent_state_buffers.append(state)
actor_processes = []
ctx = mp.get_context("fork")
free_queue = ctx.SimpleQueue()
full_queue = ctx.SimpleQueue()
for i in range(flags.num_actors):
actor = ctx.Process(
target=act,
args=(i, free_queue, full_queue, model, generator_model, buffers,
initial_agent_state_buffers, flags))
actor.start()
actor_processes.append(actor)
learner_model = Net(env.observation_space.shape, env.action_space.n, state_embedding_dim=flags.state_embedding_dim, num_input_frames=flags.num_input_frames, use_lstm=flags.use_lstm, num_lstm_layers=flags.num_lstm_layers).to(
device=flags.device
)
learner_generator_model = Generator(env.observation_space.shape, env.width, env.height, num_input_frames=flags.num_input_frames).to(device=flags.device)
optimizer = torch.optim.RMSprop(
learner_model.parameters(),
lr=flags.learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha,
)
generator_model_optimizer = torch.optim.RMSprop(
learner_generator_model.parameters(),
lr=flags.generator_learning_rate,
momentum=flags.momentum,
eps=flags.epsilon,
alpha=flags.alpha)
def lr_lambda(epoch):
return 1 - min(epoch * T * B, flags.total_frames) / flags.total_frames
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda)
generator_scheduler = torch.optim.lr_scheduler.LambdaLR(generator_model_optimizer, lr_lambda)
logger = logging.getLogger("logfile")
stat_keys = [
"total_loss",
"mean_episode_return",
"pg_loss",
"baseline_loss",
"entropy_loss",
"gen_rewards",
"gg_loss",
"generator_entropy_loss",
"generator_baseline_loss",
"mean_intrinsic_rewards",
"mean_episode_steps",
"ex_reward",
"generator_current_target",
]
logger.info("# Step\t%s", "\t".join(stat_keys))
frames, stats = 0, {}
def batch_and_learn(i, lock=threading.Lock()):
"""Thread target for the learning process."""
nonlocal frames, stats
timings = prof.Timings()
while frames < flags.total_frames:
timings.reset()
batch, agent_state = get_batch(flags, free_queue, full_queue, buffers,
initial_agent_state_buffers, timings)
stats = learn(model, learner_model, generator_model, learner_generator_model, batch, agent_state, optimizer, generator_model_optimizer, scheduler, generator_scheduler, flags, env.max_steps)
timings.time("learn")
with lock:
to_log = dict(frames=frames)
to_log.update({k: stats[k] for k in stat_keys})
plogger.log(to_log)
frames += T * B
if i == 0:
logging.info("Batch and learn: %s", timings.summary())
for m in range(flags.num_buffers):
free_queue.put(m)
threads = []
for i in range(flags.num_threads):
thread = threading.Thread(
target=batch_and_learn, name="batch-and-learn-%d" % i, args=(i,)
)
thread.start()
threads.append(thread)
def checkpoint():
if flags.disable_checkpoint:
return
logging.info("Saving checkpoint to %s", checkpointpath)
torch.save(
{
"model_state_dict": model.state_dict(),
"generator_model_state_dict": generator_model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"generator_model_optimizer_state_dict": generator_model_optimizer.state_dict(),
"scheduler_state_dict": scheduler.state_dict(),
"generator_scheduler_state_dict": generator_scheduler.state_dict(),
"flags": vars(flags),
},
checkpointpath,
)
timer = timeit.default_timer
try:
last_checkpoint_time = timer()
while frames < flags.total_frames:
start_frames = frames
start_time = timer()
time.sleep(5)
if timer() - last_checkpoint_time > 10 * 60: # Save every 10 min.
checkpoint()
last_checkpoint_time = timer()
fps = (frames - start_frames) / (timer() - start_time)
if stats.get("episode_returns", None):
mean_return = (
"Return per episode: %.1f. " % stats["mean_episode_return"]
)
else:
mean_return = ""
total_loss = stats.get("total_loss", float("inf"))
logging.info(
"After %i frames: loss %f @ %.1f fps. %sStats:\n%s",
frames,
total_loss,
fps,
mean_return,
pprint.pformat(stats),
)
except KeyboardInterrupt:
return # Try joining actors then quit.
else:
for thread in threads:
thread.join()
logging.info("Learning finished after %d frames.", frames)
finally:
for _ in range(flags.num_actors):
free_queue.put(None)
for actor in actor_processes:
actor.join(timeout=1)
checkpoint()
plogger.close()
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
class Generator(nn.Module):
"""Constructs the Teacher Policy which takes an initial observation and produces a goal."""
def __init__(self, observation_shape, width, height, num_input_frames, hidden_dim=256):
super(Generator, self).__init__()
self.observation_shape = observation_shape
self.height = height
self.width = width
self.env_dim = self.width * self.height
self.state_embedding_dim = 256
self.use_index_select = True
self.obj_dim = 5
self.col_dim = 3
self.con_dim = 2
self.num_channels = (self.obj_dim + self.col_dim + self.con_dim) * num_input_frames
if flags.disable_use_embedding:
print("not_using_embedding")
self.num_channels = 3*num_input_frames
self.embed_object = nn.Embedding(11, self.obj_dim)
self.embed_color = nn.Embedding(6, self.col_dim)
self.embed_contains = nn.Embedding(4, self.con_dim)
K = self.num_channels # number of input filters
F = 3 # filter dimensions
S = 1 # stride
P = 1 # padding
M = 16 # number of intermediate filters
Y = 8 # number of output filters
L = 4 # number of convnet layers
E = 1 # output of last layer
in_channels = [K] + [M] * 4
out_channels = [M] * 3 + [E]
conv_extract = [
nn.Conv2d(
in_channels=in_channels[i],
out_channels=out_channels[i],
kernel_size=(F, F),
stride=S,
padding=P,
)
for i in range(L)
]
def interleave(xs, ys):
return [val for pair in zip(xs, ys) for val in pair]
self.extract_representation = nn.Sequential(
*interleave(conv_extract, [nn.ELU()] * len(conv_extract))
)
self.out_dim = self.env_dim * 16 + self.obj_dim + self.col_dim
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
if flags.inner:
self.aux_env_dim = (self.height-2) * (self.width-2)
else:
self.aux_env_dim = self.env_dim
self.baseline_teacher = init_(nn.Linear(self.aux_env_dim, 1))
def _select(self, embed, x):
"""Efficient function to get embedding from an index."""
if self.use_index_select:
out = embed.weight.index_select(0, x.reshape(-1))
# handle reshaping x to 1-d and output back to N-d
return out.reshape(x.shape +(-1,))
else:
return embed(x)
def create_embeddings(self, x, id):
"""Generates compositional embeddings."""
if id == 0:
objects_emb = self._select(self.embed_object, x[:,:,:,id::3])
elif id == 1:
objects_emb = self._select(self.embed_color, x[:,:,:,id::3])
elif id == 2:
objects_emb = self._select(self.embed_contains, x[:,:,:,id::3])
embeddings = torch.flatten(objects_emb, 3, 4)
return embeddings
def convert_inner(self, goals):
"""Transform environment if using inner flag."""
goals = goals.float()
goals += 2*(1+torch.floor(goals/(self.height-2)))
goals += self.height - 1
goals = goals.long()
return goals
def agent_loc(self, frames):
"""Returns the location of an agent from an observation."""
T, B, height, width, *_ = frames.shape
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(T,B,1)
return agent_location
def forward(self, inputs):
"""Main Function, takes an observation and returns a goal."""
x = inputs["frame"]
T, B, *_ = x.shape
carried_col = inputs["carried_col"]
carried_obj = inputs["carried_obj"]
x = torch.flatten(x, 0, 1) # Merge time and batch.
if flags.disable_use_embedding:
x = x.float()
carried_obj = carried_obj.float()
carried_col = carried_col.float()
else:
x = x.long()
carried_obj = carried_obj.long()
carried_col = carried_col.long()
x = torch.cat([self.create_embeddings(x, 0), self.create_embeddings(x, 1), self.create_embeddings(x, 2)], dim = 3)
carried_obj_emb = self._select(self.embed_object, carried_obj)
carried_col_emb = self._select(self.embed_color, carried_col)
x = x.transpose(1, 3)
carried_obj_emb = carried_obj_emb.view(T * B, -1)
carried_col_emb = carried_col_emb.view(T * B, -1)
x = self.extract_representation(x)
x = x.view(T * B, -1)
generator_logits = x.view(T*B, -1)
generator_baseline = self.baseline_teacher(generator_logits)
goal = torch.multinomial(F.softmax(generator_logits, dim=1), num_samples=1)
generator_logits = generator_logits.view(T, B, -1)
generator_baseline = generator_baseline.view(T, B)
goal = goal.view(T, B)
if flags.inner:
goal = self.convert_inner(goal)
return dict(goal=goal, generator_logits=generator_logits, generator_baseline=generator_baseline)
class MinigridNet(nn.Module):
"""Constructs the Student Policy which takes an observation and a goal and produces an action."""
def __init__(self, observation_shape, num_actions, state_embedding_dim=256, num_input_frames=1, use_lstm=False, num_lstm_layers=1):
super(MinigridNet, self).__init__()
self.observation_shape = observation_shape
self.num_actions = num_actions
self.state_embedding_dim = state_embedding_dim
self.use_lstm = use_lstm
self.num_lstm_layers = num_lstm_layers
self.use_index_select = True
self.obj_dim = 5
self.col_dim = 3
self.con_dim = 2
self.goal_dim = flags.goal_dim
self.agent_loc_dim = 10
self.num_channels = (self.obj_dim + self.col_dim + self.con_dim + 1) * num_input_frames
if flags.disable_use_embedding:
print("not_using_embedding")
self.num_channels = (3+1+1+1+1)*num_input_frames
self.embed_object = nn.Embedding(11, self.obj_dim)
self.embed_color = nn.Embedding(6, self.col_dim)
self.embed_contains = nn.Embedding(4, self.con_dim)
self.embed_goal = nn.Embedding(self.observation_shape[0]*self.observation_shape[1] + 1, self.goal_dim)
self.embed_agent_loc = nn.Embedding(self.observation_shape[0]*self.observation_shape[1] + 1, self.agent_loc_dim)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.feat_extract = nn.Sequential(
init_(nn.Conv2d(in_channels=self.num_channels, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
init_(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=(3, 3), stride=2, padding=1)),
nn.ELU(),
)
self.fc = nn.Sequential(
init_(nn.Linear(32 + self.obj_dim + self.col_dim, self.state_embedding_dim)),
nn.ReLU(),
init_(nn.Linear(self.state_embedding_dim, self.state_embedding_dim)),
nn.ReLU(),
)
if use_lstm:
self.core = nn.LSTM(self.state_embedding_dim, self.state_embedding_dim, self.num_lstm_layers)
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.policy = init_(nn.Linear(self.state_embedding_dim, self.num_actions))
self.baseline = init_(nn.Linear(self.state_embedding_dim, 1))
def initial_state(self, batch_size):
"""Initializes LSTM."""
if not self.use_lstm:
return tuple()
return tuple(torch.zeros(self.core.num_layers, batch_size, self.core.hidden_size) for _ in range(2))
def create_embeddings(self, x, id):
"""Generates compositional embeddings."""
if id == 0:
objects_emb = self._select(self.embed_object, x[:,:,:,id::3])
elif id == 1:
objects_emb = self._select(self.embed_color, x[:,:,:,id::3])
elif id == 2:
objects_emb = self._select(self.embed_contains, x[:,:,:,id::3])
embeddings = torch.flatten(objects_emb, 3, 4)
return embeddings
def _select(self, embed, x):
"""Efficient function to get embedding from an index."""
if self.use_index_select:
out = embed.weight.index_select(0, x.reshape(-1))
# handle reshaping x to 1-d and output back to N-d
return out.reshape(x.shape +(-1,))
else:
return embed(x)
def agent_loc(self, frames):
"""Returns the location of an agent from an observation."""
T, B, *_ = frames.shape
agent_location = torch.flatten(frames, 2, 3)
agent_location = agent_location[:,:,:,0]
agent_location = (agent_location == 10).nonzero() # select object id
agent_location = agent_location[:,2]
agent_location = agent_location.view(T,B,1)
return agent_location
def forward(self, inputs, core_state=(), goal=[]):
"""Main Function, takes an observation and a goal and returns and action."""
# -- [unroll_length x batch_size x height x width x channels]
x = inputs["frame"]
T, B, h, w, *_ = x.shape
# -- [unroll_length*batch_size x height x width x channels]
x = torch.flatten(x, 0, 1) # Merge time and batch.
goal = torch.flatten(goal, 0, 1)
# Creating goal_channel
goal_channel = torch.zeros_like(x, requires_grad=False)
goal_channel = torch.flatten(goal_channel, 1,2)[:,:,0]
for i in range(goal.shape[0]):
goal_channel[i,goal[i]] = 1.0
goal_channel = goal_channel.view(T*B, h, w, 1)
carried_col = inputs["carried_col"]
carried_obj = inputs["carried_obj"]
if flags.disable_use_embedding:
x = x.float()
goal = goal.float()
carried_obj = carried_obj.float()
carried_col = carried_col.float()
else:
x = x.long()
goal = goal.long()
carried_obj = carried_obj.long()
carried_col = carried_col.long()
# -- [B x H x W x K]
x = torch.cat([self.create_embeddings(x, 0), self.create_embeddings(x, 1), self.create_embeddings(x, 2), goal_channel.float()], dim = 3)
carried_obj_emb = self._select(self.embed_object, carried_obj)
carried_col_emb = self._select(self.embed_color, carried_col)
if flags.no_generator:
goal_emb = torch.zeros(goal_emb.shape, dtype=goal_emb.dtype, device=goal_emb.device, requires_grad = False)
x = x.transpose(1, 3)
x = self.feat_extract(x)
x = x.view(T * B, -1)
carried_obj_emb = carried_obj_emb.view(T * B, -1)
carried_col_emb = carried_col_emb.view(T * B, -1)
union = torch.cat([x, carried_obj_emb, carried_col_emb], dim=1)
core_input = self.fc(union)
if self.use_lstm:
core_input = core_input.view(T, B, -1)
core_output_list = []
notdone = (~inputs["done"]).float()
for input, nd in zip(core_input.unbind(), notdone.unbind()):
nd = nd.view(1, -1, 1)
core_state = tuple(nd * s for s in core_state)
output, core_state = self.core(input.unsqueeze(0), core_state)
core_output_list.append(output)
core_output = torch.flatten(torch.cat(core_output_list), 0, 1)
else:
core_output = core_input
core_state = tuple()
policy_logits = self.policy(core_output)
baseline = self.baseline(core_output)
if self.training:
action = torch.multinomial(F.softmax(policy_logits, dim=1), num_samples=1)
else:
action = torch.argmax(policy_logits, dim=1)
policy_logits = policy_logits.view(T, B, self.num_actions)
baseline = baseline.view(T, B)
action = action.view(T, B)
return dict(policy_logits=policy_logits, baseline=baseline, action=action), core_state
Net = MinigridNet
GeneratorNet = Generator
class Minigrid2Image(gym.ObservationWrapper):
"""Get MiniGrid observation to ignore language instruction."""
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = env.observation_space.spaces["image"]
def observation(self, observation):
return observation["image"]
def create_env(flags):
return Minigrid2Image(wrappers.FullyObsWrapper(gym.make(flags.env)))
def main(flags):
if flags.mode == "train":
train(flags)
else:
test(flags)
if __name__ == "__main__":
flags = parser.parse_args()
main(flags)
| adversarially-motivated-intrinsic-goals-main | monobeast/minigrid/monobeast_amigo.py |
import numpy as np
import pandas as pd
import statsmodels.api as sm
import gc
import operator
import networkx as nx
from tqdm import tqdm
G = nx.watts_strogatz_graph(2000000, 10, 0.5)
assignments = np.concatenate([[k]*10 for k in list(np.random.randint(0, 2, 2000000//10))])
sample = np.random.choice(2000000, 100000)
print('genearating the graph')
data = []
for i in tqdm(sample):
neighbor = len(G[i])
bb_1 = np.sum([assignments[j] for j in G[i]])
bb_0 = neighbor - bb_1
bbb_0 = 0
bbb_1 = 0
bbb_2 = 0
bbn_0 = 0
bbn_1 = 0
bbn_2 = 0
open_square_0 = 0
open_square_1 = 0
open_square_2 = 0
open_square_3 = 0
for j in G[i]:
for k in G[i]:
if k > j:
if np.abs(j-k) <= 5: # this is a simplistic weight to judge if connected (to speed up )
if assignments[j] + assignments[k] == 0:
bbb_0 += 1
elif assignments[j] + assignments[k] == 1:
bbb_1 += 1
else:
bbb_2 += 1
else:
if assignments[j] + assignments[k] == 0:
bbn_0 += 1
elif assignments[j] + assignments[k] == 1:
bbn_1 += 1
else:
bbn_2 += 1
for l in G[i]:
if l > k and np.abs(l-k) > 5 and np.abs(l-j) > 5:
if assignments[j] + assignments[k] + assignments[l] == 0:
open_square_0 += 1
elif assignments[j] + assignments[k] + assignments[l]== 1:
open_square_1 += 1
elif assignments[j] + assignments[k] + assignments[l]== 2:
open_square_2 += 1
else:
open_square_3 += 1
data.append([i, assignments[i], neighbor, bb_0, bb_1, bbb_0, bbb_1, bbb_2, bbn_0, bbn_1, bbn_2,
open_square_0, open_square_1, open_square_2, open_square_3])
data = pd.DataFrame.from_records(data)
data.columns = ['id', 'assignment', 'neighbor', 'bb_0', 'bb_1', 'bbb_0', 'bbb_1', 'bbb_2', 'bbn_0', 'bbn_1', 'bbn_2',
'open_square_0', 'open_square_1', 'open_square_2', 'open_square_3'
]
data['open_square_3_normalized'] = 1.0 * data['open_square_3']/(data['open_square_3']+data['open_square_2']+data['open_square_1']+data['open_square_0'])
data['open_square_2_normalized'] = 1.0 * data['open_square_2']/(data['open_square_3']+data['open_square_2']+data['open_square_1']+data['open_square_0'])
data['open_square_1_normalized'] = 1.0 * data['open_square_1']/(data['open_square_3']+data['open_square_2']+data['open_square_1']+data['open_square_0'])
data['open_square_0_normalized'] = 1.0 * data['open_square_0']/(data['open_square_3']+data['open_square_2']+data['open_square_1']+data['open_square_0'])
data['bbb_2_normalized'] = 1.0 * data['bbb_2']/(data['bbb_2']+data['bbb_1']+data['bbb_0'])
data['bbb_1_normalized'] = 1.0 * data['bbb_1']/(data['bbb_2']+data['bbb_1']+data['bbb_0'])
data['bbb_0_normalized'] = 1.0 * data['bbb_0']/(data['bbb_2']+data['bbb_1']+data['bbb_0'])
data['bbn_2_normalized'] = 1.0 * data['bbn_2']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_1_normalized'] = 1.0 * data['bbn_1']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_0_normalized'] = 1.0 * data['bbn_0']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_2_normalized'] = 1.0 * data['bbn_2']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_1_normalized'] = 1.0 * data['bbn_1']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bbn_0_normalized'] = 1.0 * data['bbn_0']/(data['bbn_2']+data['bbn_1']+data['bbn_0'])
data['bb_0_normalized'] = 1.0 * data['bb_0']/(data['bb_0']+data['bb_1'])
data['bb_1_normalized'] = 1.0 * data['bb_1']/(data['bb_0']+data['bb_1'])
# compute structural diversity and structural diversity of the treated
print('computing structural diversity')
structural_diversity = []
c = 0
for uid in list(data['id']):
structural_diversity.append(
nx.number_connected_components(nx.subgraph(G, [j for j in nx.neighbors(G, uid) if assignments[j] == 1]))
)
c += 1
data['structural_diversity'] = structural_diversity
structural_diversity_1 = []
c = 0
for uid in list(data['id']):
structural_diversity_1.append(
nx.number_connected_components(nx.subgraph(G, [j for j in nx.neighbors(G, uid)]))
)
c += 1
data['structural_diversity_1'] = structural_diversity_1
data['gender'] = np.random.randint(0, 2, len(data))
# pure cutoff
data['y1'] = data['neighbor'] * 0.1 + data['gender'] * 1 + \
data['assignment'] * (data['bbb_2_normalized'] > 0.7).astype(float) * 2 + \
np.random.normal(0, 1, len(data))
# structural diversity is causal
data['y2'] = \
data['neighbor'] * 0.1 + data['gender'] * 1 + \
data['structural_diversity'] + \
data['assignment'] * data['structural_diversity'] * 1 + \
np.random.normal(0, 1, len(data))
# structural diversity is correlational
data['y3'] = \
data['neighbor'] * 0.1 + data['gender'] * 1 + \
data['structural_diversity_1'] + \
data['assignment'] * data['structural_diversity_1'] * 1 + \
np.random.normal(0, 1, len(data))
# irrelevant covariates
data['y4'] = data['neighbor'] + np.random.normal(0, 1, len(data))
data.to_csv('data_ws.csv')
# bootstrapping
print('bootstrapping')
probabilities = []
for replicate in tqdm(range(100)):
probabilities_mc = []
assignments = np.concatenate([[k]*10 for k in list(np.random.randint(0, 2, 2000000//10))])
r = np.random.randint(10)
assignments = np.concatenate([assignments[r:], assignments[:r]])
for i in sample:
neighbor = len(G[i])
bb_1 = np.sum([assignments[j] for j in G[i]])
bb_0 = neighbor - bb_1
bbb_0 = 0
bbb_1 = 0
bbb_2 = 0
bbn_0 = 0
bbn_1 = 0
bbn_2 = 0
open_square_0 = 0
open_square_1 = 0
open_square_2 = 0
open_square_3 = 0
for j in G[i]:
for k in G[i]:
if k > j:
if np.abs(j-k) <= 5:
if assignments[j] + assignments[k] == 0:
bbb_0 += 1
elif assignments[j] + assignments[k] == 1:
bbb_1 += 1
else:
bbb_2 += 1
else:
if assignments[j] + assignments[k] == 0:
bbn_0 += 1
elif assignments[j] + assignments[k] == 1:
bbn_1 += 1
else:
bbn_2 += 1
for l in G[i]:
if l > k and np.abs(l-k) > 5 and np.abs(l-j) > 5:
if assignments[j] + assignments[k] + assignments[l] == 0:
open_square_0 += 1
elif assignments[j] + assignments[k] + assignments[l]== 1:
open_square_1 += 1
elif assignments[j] + assignments[k] + assignments[l]== 2:
open_square_2 += 1
else:
open_square_3 += 1
probabilities_mc.append([bb_0, bb_1, bbb_0, bbb_1, bbb_2, bbn_0, bbn_1, bbn_2,
open_square_0, open_square_1, open_square_2, open_square_3, assignments[i]
])
probabilities.append(probabilities_mc)
probabilities = np.array(probabilities).T
np.save('probabilities_ws.npy', probabilities)
| CausalMotifs-master | generate_WS.py |
from causalPartition import causalPartition
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
# load and process data
data = pd.read_csv('data_ws.csv')
probabilities = np.load('probabilities_ws.npy')
new_probabilities = {}
new_probabilities['bbb_2_normalized'] = 1.0 * probabilities[4]/(probabilities[4]+probabilities[3]+probabilities[2])
new_probabilities['bbb_1_normalized'] = 1.0 * probabilities[3]/(probabilities[4]+probabilities[3]+probabilities[2])
new_probabilities['bbb_0_normalized'] = 1.0 * probabilities[2]/(probabilities[4]+probabilities[3]+probabilities[2])
new_probabilities['bbn_2_normalized'] = 1.0 * probabilities[7]/(probabilities[7]+probabilities[6]+probabilities[5])
new_probabilities['bbn_1_normalized'] = 1.0 * probabilities[6]/(probabilities[7]+probabilities[6]+probabilities[5])
new_probabilities['bbn_0_normalized'] = 1.0 * probabilities[5]/(probabilities[7]+probabilities[6]+probabilities[5])
new_probabilities['bb_1_normalized'] = 1.0 * probabilities[1]/(probabilities[1]+probabilities[0])
new_probabilities['bb_0_normalized'] = 1.0 * probabilities[0]/(probabilities[1]+probabilities[0])
new_probabilities['open_square_0_normalized'] = 1.0 * probabilities[8]/(probabilities[8]+probabilities[9]+probabilities[10]+probabilities[11])
new_probabilities['open_square_1_normalized'] = 1.0 * probabilities[9]/(probabilities[8]+probabilities[9]+probabilities[10]+probabilities[11])
new_probabilities['open_square_2_normalized'] = 1.0 * probabilities[10]/(probabilities[8]+probabilities[9]+probabilities[10]+probabilities[11])
new_probabilities['open_square_3_normalized'] = 1.0 * probabilities[11]/(probabilities[8]+probabilities[9]+probabilities[10]+probabilities[11])
new_probabilities['assignment'] = probabilities[-1]
# to satisfy positivity
idx = np.logical_and(np.logical_and(data['bbb_0'] + data['bbb_1'] + data['bbb_2'] > 0,
data['bbn_0'] + data['bbn_1'] + data['bbn_2'] > 0),
data['open_square_0']+data['open_square_1']+data['open_square_2']+data['open_square_3'] > 0)
data_ = data[idx]
probabilities_ = {}
input_features = ['assignment',
'bb_1_normalized', 'bb_0_normalized',
'bbb_0_normalized', 'bbb_1_normalized', 'bbb_2_normalized',
'bbn_0_normalized', 'bbn_1_normalized', 'bbn_2_normalized',
'open_square_0_normalized', 'open_square_1_normalized', 'open_square_2_normalized',
'open_square_3_normalized'
]
for key in ['assignment']+input_features:
probabilities_[key] = new_probabilities[key][idx]
# train the tree (separate=True means we treat the assignment variable as a dimension); please revise the parameters
outcome = 'y2'
partition = causalPartition(data_, probabilities_, 'assignment')
train_result_separate = partition.split_exposure_hajek(True, outcome, input_features,
max_attempt=10, eps=0.001,
delta=0.01,
criteria={'non_trivial_reduction': 0,
'min_leaf_size': 4000})
partition.plot_tree(train_result_separate)
est_result_separate = partition.estimate_exposure_hajek(train_result_separate,
input_features, outcome, eps=0.001, separate=True)
partition.plot_tree(est_result_separate)
# train the tree (separate=False means we examine heterogeneous indirect effects); please revise the parameters
outcome = 'y2'
input_features = [
# 'assignment',
'bb_1_normalized', 'bb_0_normalized',
'bbb_0_normalized', 'bbb_1_normalized', 'bbb_2_normalized',
'bbn_0_normalized', 'bbn_1_normalized', 'bbn_2_normalized',
'open_square_0_normalized', 'open_square_1_normalized', 'open_square_2_normalized', 'open_square_3_normalized'
]
partition = causalPartition(data_, probabilities_, 'assignment')
train_result_nonseparate = partition.split_exposure_hajek(False, outcome, input_features,
max_attempt=10, eps=0.001,
delta=0.01,
criteria={'non_trivial_reduction': 0,
'min_leaf_size': 4000})
partition.plot_tree(train_result_nonseparate)
est_result_separate = partition.estimate_exposure_hajek(train_result_nonseparate,
input_features, outcome, eps=0.01, separate=False)
partition.plot_tree(est_result_separate)
| CausalMotifs-master | example.py |
import numpy as np
import pandas as pd
import statsmodels.api as sm
import gc
import operator
import networkx as nx
class causalPartition:
df = None # the whole dataset
probabilities = None # the Monte Carlo probabilities, a dict, each element represents a dimension of the intervention vector
# each element is a matrix [num_nodes * num_bootstrap]
result_separate = None
result_eht = None
treatment = None # the name of treatment feature (should belong to the dict probabilities)
df_train = None
df_est = None
def __init__(self, df, probabilities, treatment, ratio_train=0.5):
"""
Ratio_train is the ratio in the training set, which is used to split the sample in the beginning to help construct honest estimator.
By default it is 50% vs 50% to make the training and estimation sets have the roguhly same widths for CIs_contain_zero
"""
self.df = df
self.probabilities = probabilities
self.treatment = treatment
self.idx_tr = np.random.random(len(df)) < ratio_train # sample the training set
self.idx_est = np.logical_not(self.idx_tr) # sample the estimation set
self.df_train = df[self.idx_tr]
self.df_est = df[np.logical_not(self.idx_tr)]
self.result_separate = None
self.est_result_separate_eht = None
# for each observation, if there is small probability of belong to the partition defined by rules
def _contain_zero(self, probabilities, rules, eps, delta, treated=None):
"""
For each observation (indicated by an element in the vector),
whether it has <= eps probability to belong to the partition implied by [rules]
Treated: == 1/0 if we want to append the rule for the treatment variable
== none otherwise
"""
if treated is None:
return np.mean(np.product([self.probabilities[key] <= th for key, sign, th in rules if sign == 0] + \
[self.probabilities[key] > th for key, sign, th in rules if sign == 1],
axis=0) > 0, axis=1
) <= eps
else:
# Also consider the treated conditions for egos.
# In separate trees, the treatment conditions for egos should also be considered
return np.mean(np.product([probabilities[key] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key] > th for key, sign, th in rules if sign == 1] + \
probabilities[self.treatment] == treated,
axis=0) > 0, axis=1
) <= eps
def _hajek_se(self, d, p, outcome):
"""
- The taylor linearization for hajek se
- WLS is directly used for non-separate cases; but in this case S.E. is usually overestimated
"""
average_hajek_var_up = np.sum( ((d[outcome]/p) ** 2) * (1 - p) ) # numerator
average_hajek_var_down = np.sum( ((1.0/p) ** 2) * (1 - p) ) # denominator
average_hajek_cov = np.sum( ((1.0/p) ** 2) * d[outcome] * (1 - p) )
average_hajek_sum_up = np.sum(d[outcome]/p) # numerator
average_hajek_sum_down = np.sum(1.0/p) # denominator
se = np.sqrt(1.0 / (average_hajek_sum_down**2) * average_hajek_var_up + \
(average_hajek_sum_up**2) / (average_hajek_sum_down**4) * average_hajek_var_down + \
- 2.0 * average_hajek_sum_up / (average_hajek_sum_down**3) * average_hajek_cov)
# Taylor linearization ((Sarndal, Swensson and Wretman, 1992, pp. 172-174)
return se
def _plot_tree(self, est_result_separate, node_id, prefix):
if node_id > 1 and node_id % 2 == 0:
sign = '<='
elif node_id > 1 and node_id % 2 == 1:
sign = '> '
else:
sign = ''
if 'left_result' in est_result_separate:
print('%s%s(%d) split %s at %f, n=%d, avg=%f, se=%f' % (prefix, sign, node_id, est_result_separate['feature'],
est_result_separate['threshold'], est_result_separate['N'], est_result_separate['hajek'], est_result_separate['hajek_se']))
self._plot_tree(est_result_separate['left_result'], node_id*2, prefix+'\t')
self._plot_tree(est_result_separate['right_result'], node_id*2+1, prefix+'\t')
else:
print('%s%s(%d) terminate, n=%d, avg=%f, se=%f' % (prefix, sign, node_id,
est_result_separate['N'], est_result_separate['hajek'], est_result_separate['hajek_se']))
def plot_tree(self, result=None):
if 1 in result:
print('treated')
self._plot_tree(result[1], 1, '')
print('non-treated')
self._plot_tree(result[0], 1, '')
else:
self._plot_tree(result, 1, '')
def _split_exposure_hajek(self, node_id, df, probabilities, feature_set, max_attempt, eps, delta,
outcome, rules, N, current_mse, criteria={'non_trivial_reduction': 0},
first_split_treatment=True):
"""
the actual splitting implementation for separate tree;
by recursion
"""
b_feature = ''
b_threshold = 0
b_left = None
b_right = None
b_average_left_hajek = 0
b_average_right_hajek = 0
b_mse = 10000000000.0 # a very large mse
ranges = {}
# enumerate each feature
for feature in feature_set:
gc.collect()
# find a more compact region
upper = 1.
lower = 0.
for rule in rules:
# rules: list of tuples to describe the decision rules
# tuples(feature, 0/1: lower or upper bound, value)
if rule[0] == feature:
if rule[1] == 0:
lower = np.maximum(rule[2], lower)
else:
upper = np.minimum(rule[2], upper)
if lower >= upper:
continue
for k in range(max_attempt):
if first_split_treatment and node_id == 1:
if feature != self.treatment or k != 0:
continue
threshold = np.random.uniform(lower, upper) # randomly select a threshold, left < , right >
# make sure it is a valid split --- each observation should have non-trial (>eps) probability to belong to each partition
cz_l = self._contain_zero(probabilities, rules+[(feature, 0, threshold)], eps, delta)
cz_r = self._contain_zero(probabilities, rules+[(feature, 1, threshold)], eps, delta)
if np.mean(cz_l) > delta or np.mean(cz_r) > delta:
continue
# if (almost) positivity can't be satisfied
idxs_left = np.product([df[key] <= th for key, sign, th in rules if sign == 0] + \
[df[key] > th for key, sign, th in rules if sign == 1] + \
[df[feature] <= threshold],
axis=0) > 0
idxs_right = np.product([df[key] <= th for key, sign, th in rules if sign == 0] + \
[df[key] > th for key, sign, th in rules if sign == 1] + \
[df[feature] > threshold],
axis=0) > 0
left = df[idxs_left]
right = df[idxs_right]
# generalized propensity score (probability of belonging in an exposure condition)
propensities_left = np.mean(np.product([probabilities[key][idxs_left] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_left] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_left] <= threshold],
axis=0) > 0, axis=1)
# generalized propensity score (probability of belonging in an exposure condition)
propensities_right = np.mean(np.product([probabilities[key][idxs_right] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_right] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_right] > threshold],
axis=0) > 0, axis=1)
# again, filter small propensities data points (usually should not filter or filter very few)
if len(left) == 0 or len(right) == 0:
continue
filter_left = propensities_left > 0
left = left[filter_left]
propensities_left = propensities_left[filter_left]
filter_right = propensities_right > 0
right = right[filter_right]
propensities_right = propensities_right[filter_right]
mod_left = sm.WLS(left[outcome], np.ones(len(left)), weights=1.0 / propensities_left)
mod_right = sm.WLS(right[outcome], np.ones(len(right)), weights=1.0 / propensities_right)
res_left = mod_left.fit()
res_right = mod_right.fit()
average_left_hajek = res_left.params[0]
average_right_hajek = res_right.params[0]
average_left_hajek_se = self._hajek_se(left, propensities_left, outcome)
average_right_hajek_se = self._hajek_se(right, propensities_right, outcome)
mse_left = np.sum((1.0 / propensities_left) * ((res_left.resid) ** 2))
mse_right = np.sum((1.0 / propensities_right) * ((res_right.resid) ** 2))
mse = mse_left * len(left)/(len(left)+len(right)) + mse_right * len(right)/(len(left)+len(right))
if mse < b_mse:
flag = True
assert len(criteria) > 0
if 'non_trivial_reduction' in criteria:
if not (mse < current_mse - criteria['non_trivial_reduction']):
flag = False
if 'reasonable_propensity' in criteria:
if not (np.abs(np.sum(1.0 / propensities_left)/len(df) - 1.0) <= criteria['reasonable_propensity'] \
and \
np.abs(np.sum(1.0 / propensities_right)/len(df) - 1.0) <= criteria['reasonable_propensity'] \
):
flag = False
if 'separate_reduction' in criteria:
if not (mse_left < current_mse and mse_right < current_mse):
flag = False
if 'min_leaf_size' in criteria:
if not (len(left) >= criteria['min_leaf_size'] and len(right) >= criteria['min_leaf_size']):
flag = False
if flag:
b_feature = feature
b_mse = mse
b_mse_left = mse_left
b_mse_right = mse_right
b_threshold = threshold
b_average_left_hajek = average_left_hajek
b_average_right_hajek = average_right_hajek
b_average_left_hajek_se = average_left_hajek_se
b_average_right_hajek_se = average_right_hajek_se
b_left_den = np.sum(1.0 / propensities_left)
b_right_den = np.sum(1.0 / propensities_right)
b_left = left
b_right = right
b_left_rules = rules + [(feature, 0, threshold)]
b_right_rules = rules + [(feature, 1, threshold)]
result = {}
if b_feature != '':
# if find a valid partition
result_left = self._split_exposure_hajek(node_id*2, df, probabilities, feature_set, max_attempt, eps, delta,
outcome, b_left_rules, len(b_left), b_mse_left, criteria)
result_right = self._split_exposure_hajek(node_id*2+1, df, probabilities, feature_set, max_attempt, eps, delta,
outcome, b_right_rules, len(b_right), b_mse_right, criteria)
result['mse'] = result_left['mse'] * 1.0 * len(b_left)/(len(b_left)+len(b_right)) + \
result_right['mse'] * 1.0 * len(b_right)/(len(b_left)+len(b_right))
result['feature'] = b_feature
result['threshold'] = b_threshold
result_left['hajek'] = b_average_left_hajek
result_right['hajek'] = b_average_right_hajek
result_left['hajek_se'] = b_average_left_hajek_se
result_right['hajek_se'] = b_average_right_hajek_se
result_left['N'] = len(b_left)
result_right['N'] = len(b_right)
result_left['den'] = b_left_den
result_right['den'] = b_right_den
result['left_result'] = result_left
result['right_result'] = result_right
return result
else:
result['mse'] = current_mse
return result
def _split_exposure_validate_eht(self, node_id, df_est, result, probabilities_est, rules, outcome, eps=0.005):
"""
estimation set for non-separate case
"""
est_result = {}
if 'left_result' in result:
est_result['feature'] = result['feature']
est_result['threshold'] = result['threshold']
est_result['left_result'] = self._split_exposure_validate_eht(node_id*2, df_est, result['left_result'], probabilities_est,
rules+[(result['feature'], 0, result['threshold'])], outcome, eps)
est_result['right_result'] = self._split_exposure_validate_eht(node_id*2+1, df_est, result['right_result'], probabilities_est,
rules+[(result['feature'], 1, result['threshold'])], outcome, eps)
if rules:
# if this is not the root
idxs = np.product([df_est[key] <= th for key, sign, th in rules if sign == 0] + \
[df_est[key] > th for key, sign, th in rules if sign == 1],
axis=0) > 0
dff = df_est[idxs]
else:
idxs = np.ones(len(df_est)).astype(bool)
dff = df_est
propensities_1 = np.mean(np.product([probabilities_est[key][idxs] <= th for key, sign, th in rules if sign == 0] + \
[probabilities_est[key][idxs] > th for key, sign, th in rules if sign == 1]+\
[probabilities_est[self.treatment][idxs] == 1],
axis=0), axis=1)
propensities_0 = np.mean(np.product([probabilities_est[key][idxs] <= th for key, sign, th in rules if sign == 0] + \
[probabilities_est[key][idxs] > th for key, sign, th in rules if sign == 1]+\
[probabilities_est[self.treatment][idxs] == 0],
axis=0), axis=1)
idxs_filter = np.logical_and(propensities_1 > 0, propensities_0 > 0)
dff = dff[idxs_filter]
propensities_1 = propensities_1[idxs_filter]
propensities_0 = propensities_0[idxs_filter]
mod = sm.WLS(dff[outcome], sm.add_constant(dff[self.treatment]),
weights=1.0 / propensities_1 * dff[self.treatment] + 1.0 / propensities_0 * (1-dff[self.treatment]))
res = mod.fit()
mse = np.sum((res.resid ** 2) * (1.0 / propensities_1 * dff[self.treatment] + 1.0 / propensities_0 * (1-dff[self.treatment])))
average_hajek = res.params[1]
average_hajek_se = res.bse[1] # dff[outcome].std() / np.sqrt(len(dff)-1)
est_result['hajek'] = average_hajek
est_result['hajek_se'] = average_hajek_se
est_result['mse'] = mse
est_result['N'] = len(dff)
return est_result
def _split_exposure_validate(self, node_id, df_est, result,
probabilities_est, rules, outcome, eps=0.005):
est_result = {}
if 'left_result' in result:
est_result['feature'] = result['feature']
est_result['threshold'] = result['threshold']
est_result['left_result'] = self._split_exposure_validate(node_id*2, df_est, result['left_result'], probabilities_est,
rules+[(result['feature'], 0, result['threshold'])], outcome, eps)
est_result['right_result'] = self._split_exposure_validate(node_id*2+1, df_est, result['right_result'], probabilities_est,
rules+[(result['feature'], 1, result['threshold'])], outcome, eps)
if rules:
idxs = np.product([df_est[key] <= th for key, sign, th in rules if sign == 0] + \
[df_est[key] > th for key, sign, th in rules if sign == 1], axis=0) > 0
dff = df_est[idxs]
propensities = np.mean(np.product([probabilities_est[key][idxs] <= th for key, sign, th in rules if sign == 0] + \
[probabilities_est[key][idxs] > th for key, sign, th in rules if sign == 1],
axis=0), axis=1)
idxs_filter = propensities > eps
dff = dff[idxs_filter]
propensities = propensities[idxs_filter]
else:
dff = df_est
propensities = np.ones(len(dff))
mod = sm.OLS(dff[outcome], np.ones(len(dff)))
res = mod.fit()
mse = np.sum((res.resid ** 2) * 1.0 / propensities)
average_hajek = res.params[0]
if node_id == 1:
average_hajek_se = dff[outcome].std() / np.sqrt(len(dff)-1)
else:
average_hajek_se = self._hajek_se(dff, propensities, outcome)
est_result['hajek'] = average_hajek
est_result['hajek_se'] = average_hajek_se
est_result['mse'] = mse
est_result['N'] = len(dff)
return est_result
def _split_exposure_hajek_eht(self, node_id, df, probabilities, feature_set, max_attempt, eps, delta, outcome, rules, N, current_mse, criteria):
"""
the actual splitting implementation for non-separate tree;
recursion
"""
b_feature = ''
b_threshold = 0
b_left = None
b_right = None
b_average_left_hajek = 0
b_average_right_hajek = 0
b_mse = 10000000000.0
ranges = {}
for feature in feature_set:
gc.collect()
# find the more compact valid region
upper = 1.
lower = 0.
for rule in rules:
if rule[0] == feature:
if rule[1] == 0:
lower = np.maximum(rule[2], lower)
else:
upper = np.minimum(rule[2], upper)
if lower > upper:
continue
for k in range(max_attempt):
threshold = np.random.uniform(lower, upper)
cz_l_1 = self._contain_zero(probabilities, rules+[(feature, 0, threshold)], eps, delta, treated=1)
cz_r_1 = self._contain_zero(probabilities, rules+[(feature, 1, threshold)], eps, delta, treated=1)
cz_l_0 = self._contain_zero(probabilities, rules+[(feature, 0, threshold)], eps, delta, treated=0)
cz_r_0 = self._contain_zero(probabilities, rules+[(feature, 1, threshold)], eps, delta, treated=0)
if np.mean(cz_l_1) > delta or np.mean(cz_r_1) > delta or np.mean(cz_r_0) > delta or np.mean(cz_r_0) > delta:
continue
idxs_left = np.product([df[key] <= th for key, sign, th in rules if sign == 0] + \
[df[key] > th for key, sign, th in rules if sign == 1] + \
[df[feature] <= threshold],
axis=0) > 0
idxs_right = np.product([df[key] <= th for key, sign, th in rules if sign == 0] + \
[df[key] > th for key, sign, th in rules if sign == 1] + \
[df[feature] > threshold],
axis=0) > 0
left = df[idxs_left]
right = df[idxs_right]
# propensity score for left partition + ego treated
propensities_left_1 = np.mean(np.product([probabilities[key][idxs_left] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_left] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_left] <= threshold] + \
[probabilities[self.treatment][idxs_left] == 1],
axis=0), axis=1)
# propensity score for left partition + ego non treated
propensities_left_0 = np.mean(np.product([probabilities[key][idxs_left] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_left] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_left] <= threshold] + \
[probabilities[self.treatment][idxs_left] == 0],
axis=0), axis=1)
propensities_right_1 = np.mean(np.product([probabilities[key][idxs_right] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_right] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_right] > threshold] + \
[probabilities[self.treatment][idxs_right] == 1],
axis=0), axis=1)
propensities_right_0 = np.mean(np.product([probabilities[key][idxs_right] <= th for key, sign, th in rules if sign == 0] + \
[probabilities[key][idxs_right] > th for key, sign, th in rules if sign == 1] + \
[probabilities[feature][idxs_right] > threshold] + \
[probabilities[self.treatment][idxs_right] == 0],
axis=0), axis=1)
# filter those whose propensities scores are very small (This may lead to lose observations)
idxs_left_filter = np.logical_and(propensities_left_1 > eps, propensities_left_0 > eps)
left = left[idxs_left_filter]
propensities_left_1 = propensities_left_1[idxs_left_filter]
propensities_left_0 = propensities_left_0[idxs_left_filter]
# filter those whose propensities scores are very small (This may lead to lose observations)
idxs_right_filter = np.logical_and(propensities_right_1 > eps, propensities_right_0 > eps)
right = right[idxs_right_filter]
propensities_right_1 = propensities_right_1[idxs_right_filter]
propensities_right_0 = propensities_right_0[idxs_right_filter]
if np.mean(left[self.treatment]) == 0 or np.mean(left[self.treatment]) == 1 or \
np.mean(right[self.treatment]) == 0 or np.mean(right[self.treatment]) == 1:
continue
if len(left) == 0 or len(right) == 0:
continue
# The covariate implementation does not work as expected; should always be None
mod_left = sm.WLS(left[outcome], sm.add_constant(left[[self.treatment]]), \
weights=1.0 / propensities_left_1 * left[self.treatment] + 1.0 / propensities_left_0 * (1-left[self.treatment]))
res_left = mod_left.fit()
mod_right = sm.WLS(right[outcome], sm.add_constant(right[self.treatment]), \
weights=1.0 / propensities_right_1 * right[self.treatment] + 1.0 / propensities_right_0 * (1-right[self.treatment]))
res_right = mod_right.fit()
average_left_hajek = res_left.params[1]
average_right_hajek = res_right.params[1]
average_left_hajek_se = res_left.bse[1]
average_right_hajek_se = res_right.bse[1]
# need further improvement
mse_left = np.sum((1.0 / propensities_left_1 * left[self.treatment] + 1.0 / propensities_left_0 * (1-left[self.treatment])) *
((res_left.resid) ** 2))
mse_right = np.sum((1.0 / propensities_right_1 * right[self.treatment] + 1.0 / propensities_right_0 * (1-right[self.treatment])) *
((res_right.resid) ** 2))
mse = mse_left * 1.0 * len(left)/(len(left)+len(right)) + mse_right * 1.0 * len(right)/(len(left)+len(right))
if mse < b_mse:
flag = True
assert len(criteria) > 0
if 'non_trivial_reduction' in criteria:
if not (mse < current_mse - criteria['non_trivial_reduction']):
flag = False
if 'reasonable_propensity' in criteria:
if not (np.abs(np.sum(1.0 / propensities_left_1 * left[self.treatment])/len(df) - 1.0) <= criteria['reasonable_propensity'] \
and \
np.abs(np.sum(1.0 / propensities_right_1 * right[self.treatment])/len(df) - 1.0) <= criteria['reasonable_propensity'] \
and \
np.abs(np.sum(1.0 / propensities_left_0 * (1 - left[self.treatment]))/len(df) - 1.0) <= criteria['reasonable_propensity'] \
and \
np.abs(np.sum(1.0 / propensities_right_0 * (1 - right[self.treatment]))/len(df) - 1.0) <= criteria['reasonable_propensity']
):
flag = False
if 'separate_reduction' in criteria:
if not (mse_left < current_mse and mse_right < current_mse):
flag = False
if 'min_leaf_size' in criteria:
if not (len(left) >= criteria['min_leaf_size'] and len(right) >= criteria['min_leaf_size']):
flag = False
if flag:
b_feature = feature
b_mse = mse
b_mse_left = mse_left
b_mse_right = mse_right
b_threshold = threshold
b_average_left_hajek = average_left_hajek
b_average_right_hajek = average_right_hajek
b_average_left_hajek_se = average_left_hajek_se
b_average_right_hajek_se = average_right_hajek_se
b_left = left
b_right = right
b_left_rules = rules + [(feature, 0, threshold)]
b_right_rules = rules + [(feature, 1, threshold)]
result = {}
if b_feature != '':
# if find a valid partition
result_left = self._split_exposure_hajek_eht(node_id*2, df, probabilities, feature_set, max_attempt, eps, delta, outcome, b_left_rules, N, b_mse_left, criteria)
result_right = self._split_exposure_hajek_eht(node_id*2+1, df, probabilities, feature_set, max_attempt, eps, delta, outcome, b_right_rules, N, b_mse_right, criteria)
result['mse'] = result_left['mse'] * 1.0 * len(b_left)/(len(b_left)+len(b_right)) + \
result_right['mse'] * 1.0 * len(b_right)/(len(b_left)+len(b_right))
result['feature'] = b_feature
result['threshold'] = b_threshold
result_left['hajek'] = b_average_left_hajek
result_right['hajek'] = b_average_right_hajek
result_left['hajek_se'] = b_average_left_hajek_se
result_right['hajek_se'] = b_average_right_hajek_se
result_left['N'] = len(b_left)
result_right['N'] = len(b_right)
result['left_result'] = result_left
result['right_result'] = result_right
return result
else:
result['mse'] = current_mse
return result
def estimate_exposure_hajek(self, train_result_separate, indirect_space, outcome, eps=0.005, separate=True):
"""
train_result_separate: result from training
indirect_space: feature space (consistent with training input)
outcome: (consistent with training input)
eps: (consistent with training input)
df_est=None: leave it
probabilities=None: leave it
separate=True: separate trees.
"""
if separate:
df_est = self.df_est
probabilities = self.probabilities
probabilities_est = {}
for key in [self.treatment]+indirect_space:
probabilities_est[key] = self.probabilities[key][self.idx_est]
est_result_separate = {}
est_result_separate = self._split_exposure_validate(1, df_est, train_result_separate, probabilities_est, [], outcome, eps)
self.est_result_separate = est_result_separate
return est_result_separate
else:
# if find a valid partition for T == 1 or 0 separately
df_est = self.df_est
probabilities_est = {}
for key in indirect_space+[self.treatment]:
probabilities_est[key] = self.probabilities[key][self.idx_est.astype(bool)]
est_result_separate_eht = {}
est_result_separate_eht = self._split_exposure_validate_eht(1, df_est, train_result_separate, probabilities_est, [], outcome, eps)
self.est_result_separate_eht = est_result_separate_eht
return est_result_separate_eht
def split_exposure_hajek(self, separate, outcome, feature_set, max_attempt=30, eps=0.0, delta=0.0,
df_train=None, probabilities=None, criteria={'non_trivial_reduction': 0}):
"""
The API for spitting
separate: True=separate trees
outcome: outcome variable
feature_set: a list of features used to partition (may include ``assignment'')
min_variance_reduct: minimum variance reduction in each partition, only partition if reduction is significantly large
max_attempt: sample threshold -- a larger value tend to over fit more
eps: avoid non-zero or zero-trivial probability
delta: avoid non-zero or zero-trivial probability
df_train: leave it as None
probabilities: leave it as None
"""
if separate == True:
df_train = self.df_train # training set
probabilities = self.probabilities # probability tensor
probabilities_train = {}
for key in [self.treatment]+feature_set:
probabilities_train[key] = probabilities[key][self.idx_tr]
mod = sm.WLS(df_train[outcome], np.ones(len(df_train)))
res = mod.fit()
total_sse = np.sum(res.resid ** 2) # total sse
train_result = {}
train_result = self._split_exposure_hajek(1, df_train, probabilities_train, feature_set, max_attempt,
eps, delta, outcome, [],
len(df_train), total_sse, criteria)
train_result['N'] = len(df_train)
train_result['hajek'] = df_train[outcome].mean()
train_result['hajek_se'] = df_train[outcome].std() / np.sqrt(len(df_train[outcome])-1)
self.result_separate = train_result
return train_result
else:
df_train = self.df_train
probabilities = self.probabilities
probabilities_train = {}
for key in [self.treatment]+feature_set:
probabilities_train[key] = probabilities[key][self.idx_tr]
mod = sm.WLS(df_train[outcome], sm.add_constant(df_train[self.treatment]))
res = mod.fit()
total_sse = np.sum(res.resid ** 2) * 2
train_result_eht = {}
train_result_eht = self._split_exposure_hajek_eht(1, df_train, probabilities_train, feature_set, max_attempt,
eps, delta, outcome, [], len(df_train), total_sse, criteria)
train_result_eht['N'] = len(df_train)
train_result_eht['hajek'] = res.params[1]
train_result_eht['hajek_se'] = res.bse[1]
return train_result_eht
| CausalMotifs-master | causalPartition.py |
import os
import sys
import torch
import logging
import argparse
import numpy as np
import pandas as pd
from scipy.stats import mode
from maude import MaudeReport
from maude.labelers.gender import lfs
from maude.labelers import LabelingServer
logger = logging.getLogger(__name__)
os.environ['CUDA_VISIBLE_DEVICES']='0'
torch.backends.cudnn.deterministic = True
def main(args):
labeler = LabelingServer(num_workers=args.n_procs, verbose=True)
uid, unlabeled, num_docs = 0, 0, 0
for i, chunk in enumerate(pd.read_csv(args.train, sep='\t', quotechar='"', chunksize=args.chunksize)):
# load chunk
documents = []
for _, row in chunk.iterrows():
documents.append(MaudeReport(row, uid))
uid += 1
# apply labeling functions
Ls = labeler.apply(lfs, [documents], block_size='auto')
Ys = []
for L in Ls:
L = L.toarray()
for row in L:
# unique, counts = np.unique(row[row.nonzero()], return_counts=True)
pred, _ = mode(row[row.nonzero()])
Ys.append(pred[0] if np.any(pred) else -1)
unlabeled += Ys.count(-1)
num_docs += len(Ys)
# write predictions
outfpath = f'{args.outdir}{i}.mv.num_lfs_{len(lfs)}.tsv'
with open(outfpath, 'w') as fp:
header = ['MDR_REPORT_KEY',
'MDR_TEXT_KEY',
'PATIENT_SEQUENCE_NUMBER',
'foi_text_md5',
'Y_PRED']
fp.write('\t'.join(header) + '\n')
for doc, y_pred in zip(documents, Ys):
row = list(doc.key) + [y_pred]
fp.write('\t'.join(map(str, row)) + '\n')
logger.info(f'Wrote {len(documents)} to {outfpath}')
logger.info(f'Done {unlabeled}/{num_docs} ({unlabeled/num_docs*100:2.1f})% have no LF coverage')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--train", type=str, default=None)
parser.add_argument("--outdir", type=str, default=None)
parser.add_argument("--chunksize", type=int, default=1000000)
parser.add_argument("--n_procs", type=int, default=4)
parser.add_argument("--device", type=str, default="gpu")
args = parser.parse_args()
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
if not torch.cuda.is_available() and args.device.lower() == 'gpu':
logger.error("Warning! CUDA not available, defaulting to CPU")
args.device = "cpu"
if torch.cuda.is_available():
logger.info("CUDA PyTorch Backends")
logger.info("torch.backends.cudnn.deterministic={}".format(torch.backends.cudnn.deterministic))
logger.info(f'PyTorch: {torch.__version__}')
logger.info(f'Python: {sys.version}')
main(args)
| icij-maude-master | preprocess.py |
from .core import MaudeReport | icij-maude-master | maude/__init__.py |
import numpy as np
class MaudeReport(object):
def __init__(self, row, unique_id):
if type(row.foi_text) is float:
row.foi_text = 'NONE'
for key in row.keys():
setattr(self, key, getattr(row, key))
self.unique_id = unique_id
@property
def key(self):
return (
int(self.MDR_REPORT_KEY) if not np.isnan(self.MDR_REPORT_KEY) else 'NaN',
int(self.MDR_TEXT_KEY) if not np.isnan(self.MDR_TEXT_KEY) else 'NaN',
int(self.PATIENT_SEQUENCE_NUMBER) if not np.isnan(self.PATIENT_SEQUENCE_NUMBER) else 'NaN',
self.foi_text_md5
)
@property
def tokens(self):
return self.foi_text.split()
def __len__(self):
"""Token length"""
return len(self.tokens)
| icij-maude-master | maude/core.py |
import re
ABSTAIN = 0
MALE = 1
FEMALE = 2
UNKNOWN = 3
# ================================================================================
#
# Mandy's LFs
#
# ================================================================================
patient_rgx = '(patient| pt | pts |patients|consumer|customer|client)'
device_part_regex = r'''(female|male) ( part | parts |connector|connectors|luer|luers|adapt| end | ends|adapter|adapters)'''
device_parts = 'connector|connectors|luer|luers|adapt|adapter|adapters'
def LF_female(report):
r_pos = re.compile('female|woman|girl', re.IGNORECASE)
electronic_part_1 = re.search(device_part_regex, report.foi_text, re.I)
electronic_part_2 = re.search(device_parts, report.foi_text, re.I)
return FEMALE if r_pos.search(
report.foi_text) and electronic_part_1 is None and electronic_part_2 is None else ABSTAIN
def LF_male(report):
r_pos = re.compile(' male | man |boy', re.IGNORECASE)
electronic_part_1 = re.search(device_part_regex, report.foi_text, re.I)
electronic_part_2 = re.search(device_parts, report.foi_text, re.I)
return MALE if r_pos.search(
report.foi_text) and electronic_part_1 is None and electronic_part_2 is None else ABSTAIN
female_pronouns = {'she', 'her', 'hers'}
female_pronouns_str = " she | her | hers "
def LF_female_pronouns(doc):
v = re.search(female_pronouns_str, doc.foi_text, re.I)
return FEMALE if v else ABSTAIN
male_pronouns = {'he', 'him', 'his'}
male_pronouns_str = " he | him | his "
def LF_male_pronouns(doc):
v = re.search(male_pronouns_str, doc.foi_text,
re.I) is not None
v &= re.search(
'physician|doctor|husband| son |father|engineer|representative|luer|connector|adapter|assembly|male part',
doc.foi_text, re.I) is None
v &= re.search(' she | her | hers ', doc.foi_text, re.I) is None # COMMENT
# v &= re.search(patient_rgx, report.foi_text, re.I) is not None
# REMOVE physician|doctor|husband| son |father|
return MALE if v else ABSTAIN
female_rgx = r'''\bshe\b'''
female_pronouns = ' she | her | hers '
male_pronouns = ' he | him | his '
def LF_female_patient_gender(doc):
v = re.search(female_rgx, doc.foi_text, re.I) is not None
v &= re.search(patient_rgx, doc.foi_text, re.I) is not None
v &= re.search(' nurse |mother|daughter|wife|luer|connector|adapter|assembly|male part', doc.foi_text, re.I) is None
# v &= re.search(male_pronouns, doc.foi_text, re.I) is None
return FEMALE if v else ABSTAIN
male_rgx = r'''\bhe\b'''
def LF_male_patient_gender(doc):
v = re.search(male_pronouns, doc.foi_text, re.I) is not None
v &= re.search(patient_rgx, doc.foi_text, re.I) is not None
v &= re.search(
'physician|doctor|husband| son |father|engineer|representative|luer|connector|adapter|assembly|male part',
doc.foi_text, re.I) is None
v &= re.search(' she | her | hers ', doc.foi_text, re.I) is None # COMMENT FOR 2M
# REMOVE FOR 2M physician|doctor|husband| son |father|
return MALE if v else ABSTAIN
female_patient_rgx = "female .? .? (patient|pt|pts|client|customer|consumer)"
male_patient_rgx = " male .? .? (patient|pt|pts|client|customer|consumer)"
def LF_female_patient_rgx(doc):
return FEMALE if re.search(female_patient_rgx, doc.foi_text, re.I) else ABSTAIN
def LF_male_patient_rgx(doc):
return MALE if re.search(male_patient_rgx, doc.foi_text, re.I) else ABSTAIN
pronouns = {'she', 'her', 'he', 'his', 'hers', 'him', 'wife', 'daughter', 'female', 'male', 'husband', 'son', 'boy',
'girl'}
female_pronouns = {'she', 'her', 'hers', 'wife', 'daughter', 'female', 'male', 'husband', 'son', 'boy', 'girl',
'father'}
# JASON - replaced with regexs
#
# def LF_patient_unknown(doc):
# v = re.search(patient_rgx, doc.foi_text, re.I) is not None
# v &= not set(doc.tokens).intersection(pronouns)
# return UNKNOWN if v else ABSTAIN
#
#
# def LF_malfunction(doc):
# v = re.search("M", doc.EVENT_TYPE, re.I) is not None
# v &= not set(doc.tokens).intersection(female_pronouns)
# return UNKNOWN if v else ABSTAIN
female_linked_terms = {'vagina', 'uterus', 'gynecological', 'breast', 'vaginal', 'pregnant', 'tampon', 'pregnancy',
'uterine', 'menstrual', 'menstruation', 'caesarean', 'gynecology', 'feminine', 'breasts',
'essure', 'hysterectomy'}
male_linked_terms_rgx = r'''\b(prostate|vasect|vas deferens|testes|penis|penile|testosterone|semen|vasovasostomy)\b'''
female_linked_terms_rgx = r'''\b(vagina|uter(ine|us)|gynecological|breast|vaginal|pregnant|tampon|pregnancy|menstrual|menstruation|caesarean|gynecology|feminine)\b''' # uter|pregn|gyn
# replaced with regexes
#
# def LF_female_terms(report):
# return FEMALE if set(report.tokens).intersection(female_linked_terms) or re.search(female_linked_terms_rgx,
# report.foi_text,
# re.I) is not None else ABSTAIN
def LF_patient_unknown(doc):
rgx = r'''\b(daughter|husband|female|male|hers|wife|girl|him|his|boy|she|son|her|he)\b'''
v = re.search(patient_rgx, doc.foi_text, re.I) is not None
v &= not re.search(rgx, doc.foi_text, re.I)
return UNKNOWN if v else ABSTAIN
def LF_malfunction(doc):
rgx = r'''\b(daughter|husband|female|father|male|hers|wife|girl|boy|she|son|her)\b'''
v = re.search("M", doc.EVENT_TYPE, re.I) is not None
v &= not re.search(rgx, doc.foi_text, re.I)
return UNKNOWN if v else ABSTAIN
def LF_female_terms(doc):
rgx = r'''\b(vagina|uter(ine|us)|gynecological|breast|vaginal|pregnant|tampon|pregnancy|menstrual|menstruation|caesarean|gynecology|feminine)\b'''
return FEMALE if re.search(rgx, doc.foi_text, re.I) else ABSTAIN
def LF_male_terms(report):
return MALE if re.search(male_linked_terms_rgx, report.foi_text, re.I) else ABSTAIN
device_part_regex = r'''(female|male) ( part | parts |connector|connectors|luer|luers|adapt| end | ends|adapter|adapters)'''
device_parts = 'connector|connectors|luer|luers|adapt|adapter|adapters'
def LF_device_part(doc):
return UNKNOWN if (re.search(device_part_regex, doc.foi_text, re.I) is not None) or re.search(device_parts,
doc.foi_text,
re.I) is not None else ABSTAIN
def LF_female_devices_terms(doc):
try:
v1 = re.search(female_linked_terms_rgx + '|female|feminine', doc.DEVICE, re.I) is not None
# v2 = re.search("female", doc.DEVICE, re.I) is not None
return FEMALE if v1 else ABSTAIN
except:
return ABSTAIN
def LF_male_devices_terms(report):
return MALE if re.search(" prost|vasect|vas deferens|testes|penis|penile|testosterone|semen| male|masculine",
report.DEVICE, re.I) else ABSTAIN
multiple_pt_regex = r'''(multiple|several|many) (patient| pt|consumer|customer|client)'''
def LF_multiple_patients(doc):
return UNKNOWN if re.search(multiple_pt_regex, doc.foi_text, re.I) else ABSTAIN
female_sentence_rgx = r'''(is|was)\s?a? female''' # r'''(patient|consumer|customer|pt|client) (is|was)\s?a? female'''
def LF_female_phrases(doc):
v = re.search(female_sentence_rgx, doc.foi_text, re.I) is not None
return FEMALE if v else ABSTAIN
def LF_wife_of(doc):
v = re.search("wife of", doc.foi_text, re.I)
return MALE if v else ABSTAIN
def LF_husband_of(doc):
v = re.search("husband of", doc.foi_text, re.I)
return FEMALE if v else ABSTAIN
def LF_genitive_wife(doc):
v = re.search("(his|her) wife", doc.foi_text, re.I)
return FEMALE if v else ABSTAIN
def LF_genitive_husband(doc):
v = re.search("(his|her) husband", doc.foi_text, re.I)
return MALE if v else ABSTAIN
def LF_wife(doc):
v = re.search("wife", doc.foi_text, re.I)
return MALE if v else ABSTAIN
def LF_husband(doc):
v = re.search("husband", doc.foi_text, re.I)
return FEMALE if v else ABSTAIN
male_sentence_rgx = r'''(is|was)\s?a? male''' # r'''(patient|consumer|customer|pt|client) (is|was)\s?a? male'''
def LF_male_phrases(doc):
v = re.search(male_sentence_rgx, doc.foi_text, re.I) is not None
return MALE if v else ABSTAIN
# NO PATIENT
# NO REPORT OF PATIENT INVOLVEMENT
no_pt_regexes = [
r'\bNO (PATIENT|PT) INVOLVEMENT',
r'\bNO REPORT OF (PATIENT|PT)',
r'\bNO [further] (PATIENT|PT) COMPLICATION'
]
# no_pt_rgx_combined = "(" + ")|(".join(no_pt_regexes) + ")"
no_pt_rgx_combined = '|'.join(no_pt_regexes)
# print(no_pt_rgx_combined)
def LF_no_patient(doc):
return UNKNOWN if re.search(no_pt_rgx_combined, doc.foi_text, re.I) is not None else ABSTAIN
pre_xmale_words = ['old', 'elderly', 'adult', 'sick', 'involves a', 'involved a', 'procedure on', 'stated that a',
'concerns a', 'performed on a', 'reported that a']
pre_male_words_rgx = '(' + '|'.join(pre_xmale_words) + ') ' + r'(male|man|boy)'
# print(pre_male_words_rgx)
def LF_pre_male_words(doc):
return MALE if re.search(pre_male_words_rgx, doc.foi_text, re.I) is not None else ABSTAIN
post_male_words = ['admitted', 'premature', 'reported experiencing', 'experienced', 'minor', 'aged', 'age',
'was ambulating', 'who was exposed', 'who was treated', 'with history of',
'over [1-9][0-9] years old', 'cardiac', 'weight']
post_male_words_rgx = r'(\bmale|\bman|\bboy) ' + '(' + '|'.join(post_male_words) + ')'
# print(post_male_words_rgx)
def LF_post_male_words(doc):
return MALE if re.search(post_male_words_rgx, doc.foi_text, re.I) is not None else ABSTAIN
pre_female_words_rgx = '(' + '|'.join(pre_xmale_words) + ') ' + r'(female|woman|girl)'
# print(pre_female_words_rgx)
def LF_pre_female_words(doc):
return FEMALE if re.search(pre_female_words_rgx, doc.foi_text, re.I) is not None else ABSTAIN
post_female_words = post_male_words
post_female_words.append('gravida')
post_female_words_rgx = r'(female|woman|girl) ' + '(' + '|'.join(post_female_words) + ')'
def LF_post_female_words(doc):
return FEMALE if re.search(post_female_words_rgx, doc.foi_text, re.I) is not None else ABSTAIN
def LF_physician_m(doc):
v = re.search(r'''\b(him|his|he)\b''', doc.foi_text, re.I)
return MALE if v is not None else ABSTAIN
def LF_physician_f(doc):
v1 = re.search("physician|doctor| rep | representative |surgeon", doc.foi_text, re.I)
v2 = re.search(female_patient_rgx, doc.foi_text, re.I)
v3 = re.search(' she | her | hers ', doc.foi_text, re.I)
v4 = re.search(male_patient_rgx, doc.foi_text, re.I)
return FEMALE if v1 is not None and (v2 is not None or v3 is not None) and v4 is None else ABSTAIN
patient_only_rgx = r'''(patient| pt | pts )'''
def LF_physician_u(doc):
# v1 = re.search("physician|doctor|rep| representative |surgeon", doc.foi_text, re.I)
v2 = re.search(' she | her | hers |woman|girl|mother|wife|daughter', doc.foi_text, re.I)
v3 = re.search(' he | him| his| man |boy|father|husband| son ', doc.foi_text, re.I)
v4 = re.search(female_linked_terms_rgx, doc.foi_text, re.I)
v5 = re.search(patient_only_rgx, doc.foi_text, re.I)
return UNKNOWN if v2 is None and v3 is None and v4 is None and v5 is None else ABSTAIN
def LF_luer(doc):
v = re.search("luer", doc.foi_text, re.I)
return UNKNOWN if v is not None else ABSTAIN
patient_stuff = '(blood|infusion|insulin|diabetes|symptom(s)?|device|reading(s)?|physician|doctor|basal|back|body|skin|eye(s)?|medication(s)?|abdomen|head|face|glucose|vision|surgeon|battery)'
his_stuff_rgx = r'\bhis '+patient_stuff
her_stuff_rgx = r'\bher '+patient_stuff
def LF_his_stuff(doc):
return MALE if re.search(his_stuff_rgx, doc.foi_text, re.I) is not None else ABSTAIN
def LF_her_stuff(doc):
return FEMALE if re.search(her_stuff_rgx, doc.foi_text, re.I) is not None else ABSTAIN
on_patient_actions_rgx= r'''(treated|treat|transported|diagnosed|implanted in|tested|wake|woke|advised|caused|assisted|found|causing|)'''
object_male_rgx = on_patient_actions_rgx + r''' him\b'''
object_female_rgx = on_patient_actions_rgx + r''' her\b'''
def LF_male_object_pronouns(report):
v = re.search(object_male_rgx, report.foi_text, re.I) is not None
v &= re.search(' female|woman|girl', report.foi_text, re.I) is None
v &= re.search(female_linked_terms_rgx, report.foi_text, re.I) is None
return MALE if v else ABSTAIN
def LF_female_object_pronouns(report):
v = re.search(object_female_rgx, report.foi_text, re.I) is not None
v &= re.search(' male | man | boy', report.foi_text, re.I) is None
return FEMALE if v else ABSTAIN
patient_actions_rgx= r'''(reported|reported that|felt|experienced|tests|tested|stated that|stated|manages|had|said|contacted)'''
female_pt_actions_rgx = r'\bshe ' + patient_actions_rgx
def LF_female_patient_actions(doc):
v = re.search(female_pt_actions_rgx, doc.foi_text, re.I) is not None
v &= re.search(' he | his | him | male | man | boy ', doc.foi_text, re.I) is None
return FEMALE if v else ABSTAIN
#male_patient_actions_rgx= r'''(reported|reported that|felt|experienced|tests|tested|stated that|stated|manages|had|said|contacted)'''
male_patient_actions_rgx = r'''(stated|stated that|felt|experienced|said|tested|contacted)'''
male_pt_actions_rgx = r'\bhe ' + male_patient_actions_rgx
def LF_male_patient_actions(doc):
v = re.search(male_pt_actions_rgx, doc.foi_text, re.I) is not None
v &= re.search(female_linked_terms_rgx, doc.foi_text, re.I) is None
return MALE if v else ABSTAIN
patient_only_rgx = r'''(patient| pt)'''
patient_reflexive_actions_rgx= r''' (reported|reported that|felt|experienced|tests|tested|stated that|stated|manages|had|said|contacted)'''
female_reflexive_actions_rgx = patient_reflexive_actions_rgx + r''' (she|her) '''
def LF_female_patient_reflexive_action(doc):
v = re.search(female_reflexive_actions_rgx, doc.foi_text, re.I) is not None
#v &= re.search(' he | his | him | male | man | boy ', doc.foi_text, re.I) is None
return FEMALE if v else ABSTAIN
male_reflexive_patient_actions_rgx = r''' (reported|reported that|felt|experienced|tests|tested|stated that|stated|manages|had|said|contacted)'''
male_reflexive_actions_rgx = male_reflexive_patient_actions_rgx + r''' (he|him|his) '''
def LF_male_patient_reflexive_action(doc):
v = re.search(male_reflexive_actions_rgx, doc.foi_text, re.I) is not None
v &= re.search(' female|woman|girl', doc.foi_text, re.I) is None
v &= re.search(female_linked_terms_rgx, doc.foi_text, re.I) is None
return MALE if v else ABSTAIN
def LF_male_patient_reflexive_action_string(doc):
v = re.search(male_reflexive_actions_rgx, doc, re.I) is not None
v &= re.search(' she | her | hers |female|woman|girl', doc, re.I) is None
v &= re.search(female_linked_terms_rgx, doc, re.I) is None
return MALE if v else ABSTAIN
# print(LF_male_patient_reflexive_action_string(" reported hist"))
female_patient_rgx = "female (patient|pt|pts|client|customer|consumer)"
male_patient_rgx = " male (patient|pt|pts|client|customer|consumer)"
def LF_female_patient_rgx_string(doc):
return FEMALE if re.search(female_patient_rgx, doc, re.I) else ABSTAIN
def LF_male_patient_rgx_string(doc):
return MALE if re.search(male_patient_rgx, doc, re.I) else ABSTAIN
female_sentence_rgx = r'''(is|was)\s?a? female''' # r'''(patient|consumer|customer|pt|client) (is|was)\s?a? female'''
def LF_physician_m_string(doc):
# v1 = re.search("physician|doctor|nurse|rep", doc.foi_text, re.I)
# v2 = re.search(female_patient_rgx, doc.foi_text, re.I)
# v3 = re.search(male_patient_rgx, doc.foi_text, re.I)
v4 = re.search(" he | him | his ", doc, re.I)
return MALE if v4 is not None else ABSTAIN
# return MALE if v1 is not None and v2 is None and (v3 is not None or v4 is not None) else ABSTAIN
# ================================================================================
#
# Jason's LFs
#
# ================================================================================
def LF_healthcare_worker(doc):
devices = {'DOCTOR', 'SURGEON', 'PREVIOUS SURGERY', 'NURSE', 'UROLOGIST'}
rgx = f"his ({'|'.join(devices).lower()})"
if re.search(r'''\b''' + rgx, doc.foi_text, re.I):
return MALE
rgx = f"her ({'|'.join(devices).lower()})"
if re.search(r'''\b''' + rgx, doc.foi_text, re.I):
return FEMALE
return ABSTAIN
def LF_devices(doc):
devices = {'INCIDENT', 'IMPLANTABLE', 'HANDHELD', 'BED', 'VNS',
'BLOOD GLUCOSE', 'DIABETES', 'ONE TOUCH VITA METER',
'CATHETER', 'HOMECHOICE', 'INSULIN', 'DEVICE', 'PUMP',
'INFUSION'}
rgx = f"his ({'|'.join(devices).lower()})"
if re.search(r'''\b''' + rgx, doc.foi_text, re.I):
return MALE
rgx = f"her ({'|'.join(devices).lower()})"
if re.search(r'''\b''' + rgx, doc.foi_text, re.I):
return FEMALE
return ABSTAIN
def LF_anatomy(doc):
devices = {'FINGER', 'CHEST', 'FOOT', 'ARM', 'ABDOMEN',
'BODY', 'CHEST', 'EYES', 'INTERNAL ORGANS'}
rgx = f"his ({'|'.join(devices).lower()})"
if re.search(r'''\b''' + rgx, doc.foi_text, re.I):
return MALE
rgx = f"her ({'|'.join(devices).lower()})"
if re.search(r'''\b''' + rgx, doc.foi_text, re.I):
return FEMALE
return ABSTAIN
def LF_spouse(doc):
rgx = r"""((pt[.]*|patient)\s*('s)*) (wife|husband)|(wife|husband) of (pt[.]*|patient)"""
m = re.search(rgx, doc.foi_text, re.I)
if not m:
return ABSTAIN
return MALE if 'WIFE' in m.group() else FEMALE
lfs = [
LF_female,
LF_male,
LF_female_pronouns,
LF_male_pronouns,
LF_female_patient_gender,
LF_male_patient_gender,
LF_female_patient_rgx,
LF_male_patient_rgx,
LF_patient_unknown,
LF_female_terms,
LF_male_terms,
LF_device_part,
LF_multiple_patients,
LF_female_devices_terms,
LF_female_phrases,
LF_male_phrases,
LF_no_patient,
LF_pre_male_words,
LF_post_male_words,
LF_pre_female_words,
LF_post_female_words,
LF_malfunction,
#LF_wife,
#LF_husband,
LF_wife_of,
LF_husband_of,
LF_genitive_wife,
LF_genitive_husband,
LF_physician_u,
LF_physician_f,
#LF_physician_m,
LF_luer,
LF_her_stuff,
LF_his_stuff,
LF_male_object_pronouns,
LF_female_object_pronouns,
#LF_female_patient_actions,
#LF_male_patient_actions,
LF_female_patient_reflexive_action,
#LF_male_patient_reflexive_action
# NEW 6-16
LF_devices,
LF_anatomy,
LF_healthcare_worker,
LF_spouse
] | icij-maude-master | maude/labelers/gender.py |
from .core import LabelingServer | icij-maude-master | maude/labelers/__init__.py |
import itertools
import numpy as np
from scipy import sparse
from functools import partial
from toolz import partition_all
from joblib import Parallel, delayed
class Distributed(object):
def __init__(self,
num_workers=1,
backend='multiprocessing',
verbose=False):
self.client = Parallel(n_jobs=num_workers,
backend="multiprocessing",
prefer="processes")
self.num_workers = num_workers
self.verbose = verbose
if self.verbose:
print(self.client)
class LabelingServer(Distributed):
def __init__(self, num_workers=1, backend='multiprocessing', verbose=False):
super().__init__(num_workers, backend, verbose)
@staticmethod
def worker(lfs, data):
return sparse.csr_matrix(np.vstack([[lf(x) for lf in lfs] for x in data]))
def apply(self, lfs, Xs, block_size=None):
"""
:param lfs:
:param Xs:
:param block_size:
:return:
"""
blocks = Xs
if block_size == 'auto':
block_size = int(np.ceil(np.sum([len(x) for x in Xs]) / self.num_workers))
if self.verbose:
print(f'auto block size={block_size}')
if block_size:
blocks = list(partition_all(block_size, itertools.chain.from_iterable(Xs)))
if self.verbose:
print(f"Partitioned into {len(blocks)} blocks, {np.unique([len(x) for x in blocks])} sizes")
do = delayed(partial(LabelingServer.worker, lfs))
jobs = (do(batch) for batch in blocks)
L = sparse.vstack(self.client(jobs))
# merge matrix blocks
Ls = []
i = 0
for n in [len(x) for x in Xs]:
Ls.append(L[i:i + n].copy())
i += n
return Ls
| icij-maude-master | maude/labelers/core.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.